From 6fe4cdc06b2deb2b4f090e6c79dcc9c30211e0c7 Mon Sep 17 00:00:00 2001 From: Anish Umale Date: Fri, 19 Jul 2024 03:57:41 +0530 Subject: [PATCH 1/9] add initial scaffolding for running an exploration --- mathesar/utils/explorations.py | 82 ++++++++++++++++++++++++++++++++++ 1 file changed, 82 insertions(+) diff --git a/mathesar/utils/explorations.py b/mathesar/utils/explorations.py index db29594623..f5a9e9ca22 100644 --- a/mathesar/utils/explorations.py +++ b/mathesar/utils/explorations.py @@ -1,4 +1,7 @@ from mathesar.models.base import Explorations +from db.queries.base import DBQuery, InitialColumn, JoinParameter +from db.engine import create_future_engine_with_custom_types +from mathesar.state import get_cached_metadata def get_explorations(database_id): @@ -7,3 +10,82 @@ def get_explorations(database_id): def delete_exploration(exploration_id): Explorations.objects.get(id=exploration_id).delete() + + +def run_exploration(exploration_def, conn): + engine = create_future_engine_with_custom_types( + conn.info.user, + conn.info.password, + conn.info.host, + conn.info.dbname, + conn.info.port + ) + initial_columns = exploration_def.get('initial_columns') + processed_initial_columns = [] + for column in initial_columns: + jp_path = column.get("join_path") + if jp_path is not None: + join_path = [ + JoinParameter( + left_oid=i[0][0], + left_attnum=i[0][1], + right_oid=i[1][0], + right_attnum=i[1][1] + ) for i in jp_path + ] + processed_initial_columns.append( + InitialColumn( + reloid=column["table_oid"], + attnum=column["attnum"], + alias=column["alias"], + jp_path=join_path + ) + ) + db_query = DBQuery( + base_table_oid=exploration_def["base_table_oid"], + initial_columns=processed_initial_columns, + engine=engine, + transformations=exploration_def.get("transformations", []), + name=None, + metadata=get_cached_metadata() + ) + return { + "query": (), + "records": db_query.get_records(), + "output_columns": (), + "column_metadata": (), + "parameters": {}, + } + +# { +# "base_table": 7, +# "initial_columns": [ +# { +# "id": 13, +# "alias": "Checkouts_Item" +# }, +# {  +# "id": 6, +# "alias": "Books_Page Count", +# "join_path": [ +# [ +# 13, +# 29 +# ], +# [ +# 20, +# 27 +# ] +# ] +# } +# ], +# "transformations": [], +# "display_names": { +# "Checkouts_Item": "Checkouts_Item", +# "Books_Page Count": "Books_Page Count" +# }, +# "parameters": { +# "limit": 100, +# "offset": 0  +# } +# } From e3e6c58daa8bd675db3af3d88539a91e99336e7b Mon Sep 17 00:00:00 2001 From: Anish Umale Date: Tue, 23 Jul 2024 04:52:16 +0530 Subject: [PATCH 2/9] add sql for getting records for an exploration --- db/sql/00_msar.sql | 78 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 78 insertions(+) diff --git a/db/sql/00_msar.sql b/db/sql/00_msar.sql index a7c3a1ae53..e3d07a60be 100644 --- a/db/sql/00_msar.sql +++ b/db/sql/00_msar.sql @@ -3453,3 +3453,81 @@ BEGIN RETURN records; END; $$ LANGUAGE plpgsql; + + +CREATE OR REPLACE FUNCTION +msar.list_records_from_exploration( + exploration jsonb +) RETURNS jsonb AS $$ +DECLARE + records jsonb; + base_table_oid bigint; + columns jsonb; + sel text; +BEGIN + base_table_oid := exploration->'base_table_oid'; + columns := exploration->'initial_columns'; + sel := build_selectable_for_explorations(base_table_oid, columns); +END; +$$ LANGUAGE plpgsql; + + +CREATE OR REPLACE FUNCTION +msar.build_selectable_for_explorations( + base_table_oid bigint, + columns jsonb +) RETURNS text AS $$ +DECLARE + sel text; +BEGIN + WITH col_cte AS ( + SELECT (x->>'id')::smallint AS id, x->>'alias' AS alias, x->'join_path' AS jp FROM jsonb_array_elements('[{"id": 2, "alias": "abcd"},{"id": 5, "alias":"efgh", "join_path":[[[2254329, 2], [2254334, 1]],[[2254334, 5], [2254321, 1]]]}]'::jsonb) WITH ORDINALITY AS j(x, idx) + ), jp_cte AS ( + SELECT + (col#>>'{0,0}')::oid AS lt_oid, + (col#>>'{0,1}')::smallint AS lt_attnum, + (col#>>'{1,0}')::oid AS rt_oid, + (col#>>'{1,1}')::smallint AS rt_attnum + FROM (SELECT jsonb_array_elements(jp) FROM col_cte) AS y(col) + ), sel_base_cte AS ( + SELECT + CASE + WHEN col_cte.jp IS NULL + AND has_column_privilege(2254329, col_cte.id, 'SELECT') + THEN + format('msar.format_data(%I) AS %I', msar.get_column_name(2254329, col_cte.id), col_cte.alias) + END as base_sel + FROM col_cte + ), join_text_cte AS ( + SELECT + CASE + WHEN col_cte.jp IS NOT NULL + AND has_column_privilege((col_cte.jp#>>'{-1,-1,0}')::oid, col_cte.id, 'SELECT') + AND has_column_privilege(lt_oid, lt_attnum, 'SELECT') + AND has_column_privilege(rt_oid, rt_attnum, 'SELECT') + THEN + jsonb_build_object( + 'join_sel', + format( + 'msar.format_data(%I.%I) AS %I', + col_cte.jp#>>'{-1,-1,0}', + col_cte.id, + col_cte.alias + ), + 'join_text', + format( + 'LEFT OUTER JOIN %I.%I AS %I ON %I.%I=%I.%I', + msar.get_relation_schema_name(rt_oid), + msar.get_relation_name(rt_oid), + rt_oid, + lt_oid, + msar.get_column_name(lt_oid, lt_attnum), + rt_oid, + msar.get_column_name(rt_oid, rt_attnum) + ) + ) + END AS joins + FROM jp_cte, col_cte + ) SELECT distinct *, joins->>'join_sel', joins->>'join_text' FROM join_text_cte, sel_base_cte; +END; +$$ LANGUAGE plpgsql; From 19e52dbd81728ae97f780197648c4ccbdbc4b6a5 Mon Sep 17 00:00:00 2001 From: Anish Umale Date: Tue, 30 Jul 2024 19:33:01 +0530 Subject: [PATCH 3/9] enrich exploration response with column metadata --- mathesar/utils/explorations.py | 129 ++++++++++++++++++++++----------- 1 file changed, 85 insertions(+), 44 deletions(-) diff --git a/mathesar/utils/explorations.py b/mathesar/utils/explorations.py index f5a9e9ca22..1ace2b5339 100644 --- a/mathesar/utils/explorations.py +++ b/mathesar/utils/explorations.py @@ -1,6 +1,11 @@ -from mathesar.models.base import Explorations -from db.queries.base import DBQuery, InitialColumn, JoinParameter from db.engine import create_future_engine_with_custom_types +from db.records.operations.group import GroupBy +from db.records.operations.select import get_count +from db.queries.base import DBQuery, InitialColumn, JoinParameter +from db.tables.operations.select import get_table +from mathesar.api.utils import process_annotated_records +from mathesar.models.base import Explorations, ColumnMetaData +from mathesar.rpc.columns.metadata import ColumnMetaDataRecord from mathesar.state import get_cached_metadata @@ -12,7 +17,7 @@ def delete_exploration(exploration_id): Explorations.objects.get(id=exploration_id).delete() -def run_exploration(exploration_def, conn): +def run_exploration(exploration_def, database_id, conn): engine = create_future_engine_with_custom_types( conn.info.user, conn.info.password, @@ -20,7 +25,10 @@ def run_exploration(exploration_def, conn): conn.info.dbname, conn.info.port ) + metadata = get_cached_metadata() + base_table_oid = exploration_def["base_table_oid"] initial_columns = exploration_def.get('initial_columns') + params = exploration_def.get('parameters', {}) processed_initial_columns = [] for column in initial_columns: jp_path = column.get("join_path") @@ -35,57 +43,90 @@ def run_exploration(exploration_def, conn): ] processed_initial_columns.append( InitialColumn( - reloid=column["table_oid"], + reloid=jp_path[-1][-1][0] if jp_path else base_table_oid, attnum=column["attnum"], alias=column["alias"], - jp_path=join_path + jp_path=join_path if jp_path else None ) ) db_query = DBQuery( - base_table_oid=exploration_def["base_table_oid"], + base_table_oid=base_table_oid, initial_columns=processed_initial_columns, engine=engine, transformations=exploration_def.get("transformations", []), name=None, - metadata=get_cached_metadata() + metadata=metadata + ) + records = db_query.get_records( + limit=params.get('limit', 100), + offset=params.get('offset', 0), + filter=params.get('filter', None), + order_by=params.get('order_by', []), + group_by=GroupBy(**params.get('grouping')) if params.get('grouping', None) else None, + search=params.get('search', []), + duplicate_only=params.get('duplicate_only', None) ) + processed_records = process_annotated_records(records)[0] + with conn: + column_metadata = _get_exploration_column_metadata( + exploration_def, + processed_initial_columns, + database_id, + db_query, + conn, + engine, + metadata + ) return { - "query": (), - "records": db_query.get_records(), - "output_columns": (), - "column_metadata": (), - "parameters": {}, + "query": exploration_def, + "records": { + "count": get_count( + table=db_query.transformed_relation, + engine=engine, + filter=params.get('filter', None) + ), + "grouping": None, + "preview_data": None, + "results": processed_records + }, + "output_columns": tuple(sa_col.name for sa_col in db_query.sa_output_columns), + "column_metadata": column_metadata, + "parameters": params, } -# { -# "base_table": 7, -# "initial_columns": [ -# { -# "id": 13, -# "alias": "Checkouts_Item" -# }, -# {  -# "id": 6, -# "alias": "Books_Page Count", -# "join_path": [ -# [ -# 13, -# 29 -# ], -# [ -# 20, -# 27 -# ] -# ] -# } -# ], -# "transformations": [], -# "display_names": { -# "Checkouts_Item": "Checkouts_Item", -# "Books_Page Count": "Books_Page Count" -# }, -# "parameters": { -# "limit": 100, -# "offset": 0  -# } -# } + +def _get_exploration_column_metadata( + exploration_def, + processed_initial_columns, + database_id, + db_query, + conn, + engine, + metadata +): + exploration_column_metadata = {} + for alias, sa_col in db_query.all_sa_columns_map.items(): + initial_column = None + for col in processed_initial_columns: + if alias == col.alias: + initial_column = col + column_metadata = ColumnMetaData.objects.filter( + database__id=database_id, + table_oid=initial_column.reloid, + attnum=sa_col.column_attnum + ).first() if initial_column else None + input_table_name = get_table(initial_column.reloid, conn)["name"] if initial_column else None + input_column_name = initial_column.get_name(engine, metadata) if initial_column else None + exploration_column_metadata[alias] = { + "alias": alias, + "display_name": exploration_def["display_names"].get(alias), + "type": sa_col.db_type.id, + "type_options": sa_col.type_options, + "display_options": ColumnMetaDataRecord.from_model(column_metadata) if column_metadata else None, + "is_initial_column": True if initial_column else False, + "input_column_name": input_column_name, + "input_table_name": input_table_name, + "input_table_id": initial_column.reloid if initial_column else None, + "input_alias": db_query.get_input_alias_for_output_alias(alias) + } + return exploration_column_metadata From efcad5f10bb697c26c61cd3a48d1b3485b0e74e4 Mon Sep 17 00:00:00 2001 From: Anish Umale Date: Tue, 30 Jul 2024 19:34:37 +0530 Subject: [PATCH 4/9] remove grouping and preview_data keys from response --- mathesar/utils/explorations.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/mathesar/utils/explorations.py b/mathesar/utils/explorations.py index 1ace2b5339..c42601378f 100644 --- a/mathesar/utils/explorations.py +++ b/mathesar/utils/explorations.py @@ -85,8 +85,6 @@ def run_exploration(exploration_def, database_id, conn): engine=engine, filter=params.get('filter', None) ), - "grouping": None, - "preview_data": None, "results": processed_records }, "output_columns": tuple(sa_col.name for sa_col in db_query.sa_output_columns), From e9e126edb0f830507ffc598c41576a2f07484353 Mon Sep 17 00:00:00 2001 From: Anish Umale Date: Tue, 30 Jul 2024 19:37:14 +0530 Subject: [PATCH 5/9] revert sql changes --- db/sql/00_msar.sql | 78 ---------------------------------------------- 1 file changed, 78 deletions(-) diff --git a/db/sql/00_msar.sql b/db/sql/00_msar.sql index 2c2ffd272c..aefcc77617 100644 --- a/db/sql/00_msar.sql +++ b/db/sql/00_msar.sql @@ -3496,81 +3496,3 @@ BEGIN RETURN records; END; $$ LANGUAGE plpgsql; - - -CREATE OR REPLACE FUNCTION -msar.list_records_from_exploration( - exploration jsonb -) RETURNS jsonb AS $$ -DECLARE - records jsonb; - base_table_oid bigint; - columns jsonb; - sel text; -BEGIN - base_table_oid := exploration->'base_table_oid'; - columns := exploration->'initial_columns'; - sel := build_selectable_for_explorations(base_table_oid, columns); -END; -$$ LANGUAGE plpgsql; - - -CREATE OR REPLACE FUNCTION -msar.build_selectable_for_explorations( - base_table_oid bigint, - columns jsonb -) RETURNS text AS $$ -DECLARE - sel text; -BEGIN - WITH col_cte AS ( - SELECT (x->>'id')::smallint AS id, x->>'alias' AS alias, x->'join_path' AS jp FROM jsonb_array_elements('[{"id": 2, "alias": "abcd"},{"id": 5, "alias":"efgh", "join_path":[[[2254329, 2], [2254334, 1]],[[2254334, 5], [2254321, 1]]]}]'::jsonb) WITH ORDINALITY AS j(x, idx) - ), jp_cte AS ( - SELECT - (col#>>'{0,0}')::oid AS lt_oid, - (col#>>'{0,1}')::smallint AS lt_attnum, - (col#>>'{1,0}')::oid AS rt_oid, - (col#>>'{1,1}')::smallint AS rt_attnum - FROM (SELECT jsonb_array_elements(jp) FROM col_cte) AS y(col) - ), sel_base_cte AS ( - SELECT - CASE - WHEN col_cte.jp IS NULL - AND has_column_privilege(2254329, col_cte.id, 'SELECT') - THEN - format('msar.format_data(%I) AS %I', msar.get_column_name(2254329, col_cte.id), col_cte.alias) - END as base_sel - FROM col_cte - ), join_text_cte AS ( - SELECT - CASE - WHEN col_cte.jp IS NOT NULL - AND has_column_privilege((col_cte.jp#>>'{-1,-1,0}')::oid, col_cte.id, 'SELECT') - AND has_column_privilege(lt_oid, lt_attnum, 'SELECT') - AND has_column_privilege(rt_oid, rt_attnum, 'SELECT') - THEN - jsonb_build_object( - 'join_sel', - format( - 'msar.format_data(%I.%I) AS %I', - col_cte.jp#>>'{-1,-1,0}', - col_cte.id, - col_cte.alias - ), - 'join_text', - format( - 'LEFT OUTER JOIN %I.%I AS %I ON %I.%I=%I.%I', - msar.get_relation_schema_name(rt_oid), - msar.get_relation_name(rt_oid), - rt_oid, - lt_oid, - msar.get_column_name(lt_oid, lt_attnum), - rt_oid, - msar.get_column_name(rt_oid, rt_attnum) - ) - ) - END AS joins - FROM jp_cte, col_cte - ) SELECT distinct *, joins->>'join_sel', joins->>'join_text' FROM join_text_cte, sel_base_cte; -END; -$$ LANGUAGE plpgsql; From 9d2d7cd86b8f1db97e73d72ad1505a78ee4ba78f Mon Sep 17 00:00:00 2001 From: Anish Umale Date: Tue, 30 Jul 2024 23:25:18 +0530 Subject: [PATCH 6/9] add run endpoint along with docstrings and endpoint test --- docs/docs/api/rpc.md | 3 ++ mathesar/rpc/explorations.py | 70 ++++++++++++++++++++++++++-- mathesar/tests/rpc/test_endpoints.py | 5 ++ mathesar/utils/explorations.py | 21 ++++----- 4 files changed, 85 insertions(+), 14 deletions(-) diff --git a/docs/docs/api/rpc.md b/docs/docs/api/rpc.md index 0f969220fb..dad6a1ee61 100644 --- a/docs/docs/api/rpc.md +++ b/docs/docs/api/rpc.md @@ -194,7 +194,10 @@ To use an RPC function: members: - list_ - delete + - run - ExplorationInfo + - ExplorationDef + - ExplorationResult ## Roles diff --git a/mathesar/rpc/explorations.py b/mathesar/rpc/explorations.py index 5ac9bfc168..7313f12af2 100644 --- a/mathesar/rpc/explorations.py +++ b/mathesar/rpc/explorations.py @@ -3,16 +3,17 @@ """ from typing import Optional, TypedDict -from modernrpc.core import rpc_method +from modernrpc.core import rpc_method, REQUEST_KEY from modernrpc.auth.basic import http_basic_auth_login_required from mathesar.rpc.exceptions.handlers import handle_rpc_exceptions -from mathesar.utils.explorations import get_explorations, delete_exploration +from mathesar.rpc.utils import connect +from mathesar.utils.explorations import get_explorations, delete_exploration, run_exploration class ExplorationInfo(TypedDict): """ - Information about a Exploration. + Information about an exploration. Attributes: id: The Django id of an exploration. @@ -50,6 +51,52 @@ def from_model(cls, model): ) +class ExplorationDef(TypedDict): + """ + Definition about a runnable exploration. + + Attributes: + base_table_oid: The OID of the base table of the exploration on the database. + initial_columns: A list describing the columns to be included in the exploration. + display_names: A map between the actual column names on the database and the alias to be displayed. + transformations: A list describing the transformations to be made on the included columns. + parameters: A dict describing the properties to be applied while retrieving records e.g. limit, offset, filter, order_by, etc. + """ + base_table_oid: int + initial_columns: list + display_names: dict + transformations: Optional[list] + parameters: Optional[dict] + + +class ExplorationResult(TypedDict): + """ + Result of a ran exploration. + + Attributes: + query: A dict describing the exploration that ran. + records: A dict describing the total count of records along with the contents of those records. + output_columns: A tuple describing the names of the columns included in the exploration. + column_metadata: A dict describing the metadata applied to included columns. + parameters: A dict describing the properties applied while retrieving records e.g. limit, offset, filter, order_by, etc. + """ + query: dict + records: dict + output_columns: tuple + column_metadata: dict + parameters: dict + + @classmethod + def from_dict(cls, e): + return cls( + query=e["query"], + records=e["records"], + output_columns=e["output_columns"], + column_metadata=e["column_metadata"], + parameters=e["parameters"] + ) + + @rpc_method(name="explorations.list") @http_basic_auth_login_required @handle_rpc_exceptions @@ -78,3 +125,20 @@ def delete(*, exploration_id: int, **kwargs) -> None: exploration_id: The Django id of the exploration to delete. """ delete_exploration(exploration_id) + + +@rpc_method(name="explorations.run") +@http_basic_auth_login_required +@handle_rpc_exceptions +def run(*, exploration_def: ExplorationDef, database_id: int, **kwargs) -> ExplorationResult: + """ + Run an exploration. + + Args: + exploration_def: A dict describing an exploration to run. + database_id: The Django id of the database containing the base table for the exploration. + """ + user = kwargs.get(REQUEST_KEY).user + with connect(database_id, user) as conn: + exploration_result = run_exploration(exploration_def, database_id, conn) + return ExplorationResult.from_dict(exploration_result) diff --git a/mathesar/tests/rpc/test_endpoints.py b/mathesar/tests/rpc/test_endpoints.py index 91db2de782..99817ea762 100644 --- a/mathesar/tests/rpc/test_endpoints.py +++ b/mathesar/tests/rpc/test_endpoints.py @@ -159,6 +159,11 @@ "explorations.delete", [user_is_authenticated] ), + ( + explorations.run, + "explorations.run", + [user_is_authenticated] + ) ( roles.list_, "roles.list", diff --git a/mathesar/utils/explorations.py b/mathesar/utils/explorations.py index c42601378f..97d88ecce7 100644 --- a/mathesar/utils/explorations.py +++ b/mathesar/utils/explorations.py @@ -27,7 +27,7 @@ def run_exploration(exploration_def, database_id, conn): ) metadata = get_cached_metadata() base_table_oid = exploration_def["base_table_oid"] - initial_columns = exploration_def.get('initial_columns') + initial_columns = exploration_def['initial_columns'] params = exploration_def.get('parameters', {}) processed_initial_columns = [] for column in initial_columns: @@ -67,16 +67,15 @@ def run_exploration(exploration_def, database_id, conn): duplicate_only=params.get('duplicate_only', None) ) processed_records = process_annotated_records(records)[0] - with conn: - column_metadata = _get_exploration_column_metadata( - exploration_def, - processed_initial_columns, - database_id, - db_query, - conn, - engine, - metadata - ) + column_metadata = _get_exploration_column_metadata( + exploration_def, + processed_initial_columns, + database_id, + db_query, + conn, + engine, + metadata + ) return { "query": exploration_def, "records": { From 7722fccc2a648a63d01379a60f12c9b6db2a9c95 Mon Sep 17 00:00:00 2001 From: Anish Umale Date: Tue, 30 Jul 2024 23:48:53 +0530 Subject: [PATCH 7/9] fix test --- mathesar/tests/rpc/test_endpoints.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mathesar/tests/rpc/test_endpoints.py b/mathesar/tests/rpc/test_endpoints.py index 99817ea762..6383d28b3a 100644 --- a/mathesar/tests/rpc/test_endpoints.py +++ b/mathesar/tests/rpc/test_endpoints.py @@ -163,7 +163,7 @@ explorations.run, "explorations.run", [user_is_authenticated] - ) + ), ( roles.list_, "roles.list", From 4cc4c144a3c634bffdd57e97ee6eec2da3239e39 Mon Sep 17 00:00:00 2001 From: Anish Umale Date: Thu, 1 Aug 2024 02:02:06 +0530 Subject: [PATCH 8/9] split parameters --- mathesar/rpc/explorations.py | 54 ++++++++++++++++++++++++++++++---- mathesar/utils/explorations.py | 24 ++++++++------- 2 files changed, 62 insertions(+), 16 deletions(-) diff --git a/mathesar/rpc/explorations.py b/mathesar/rpc/explorations.py index 7313f12af2..d197b27d1b 100644 --- a/mathesar/rpc/explorations.py +++ b/mathesar/rpc/explorations.py @@ -60,31 +60,68 @@ class ExplorationDef(TypedDict): initial_columns: A list describing the columns to be included in the exploration. display_names: A map between the actual column names on the database and the alias to be displayed. transformations: A list describing the transformations to be made on the included columns. - parameters: A dict describing the properties to be applied while retrieving records e.g. limit, offset, filter, order_by, etc. + limit: Specifies the number of rows to return.(default 100) + offset: Specifies the number of rows to skip.(default 0) + filter: A dict describing filters to be applied to an exploration. + e.g. Here is a dict describing getting records from exploration where "col1" = NULL and "col2" = "abc" + ``` + {"and": [ + {"null": [ + {"column_name": ["col1"]}, + ]}, + {"equal": [ + {"to_lowercase": [ + {"column_name": ["col2"]}, + ]}, + {"literal": ["abc"]}, + ]}, + ]} + ``` + Refer to db/functions/base.py for all the possible filters. + order_by: A list of dicts, where each dict has a `field` and `direction` field. + Here the value for `field` should be column name and `direction` should be either `asc` or `desc`. + search: A list of dicts, where each dict has a `column` and `literal` field. + Here the value for `column` should be a column name and `literal` should be a string to be searched in the aforementioned column. + duplicate_only: A list of column names for which you want duplicate records. """ base_table_oid: int initial_columns: list display_names: dict transformations: Optional[list] - parameters: Optional[dict] + limit: Optional[int] + offset: Optional[int] + filter: Optional[dict] + order_by: Optional[list[dict]] + search: Optional[list[dict]] + duplicate_only: Optional[list] class ExplorationResult(TypedDict): """ - Result of a ran exploration. + Result of an exploration run. Attributes: query: A dict describing the exploration that ran. records: A dict describing the total count of records along with the contents of those records. output_columns: A tuple describing the names of the columns included in the exploration. column_metadata: A dict describing the metadata applied to included columns. - parameters: A dict describing the properties applied while retrieving records e.g. limit, offset, filter, order_by, etc. + limit: Specifies the max number of rows returned.(default 100) + offset: Specifies the number of rows skipped.(default 0) + filter: A dict describing filters applied to an exploration. + order_by: The ordering applied to the columns of an exploration. + search: Specifies a list of dicts containing column names and searched expression. + duplicate_only: A list of column names for which you want duplicate records. """ query: dict records: dict output_columns: tuple column_metadata: dict - parameters: dict + limit: Optional[int] + offset: Optional[int] + filter: Optional[dict] + order_by: Optional[list[dict]] + search: Optional[list[dict]] + duplicate_only: Optional[list] @classmethod def from_dict(cls, e): @@ -93,7 +130,12 @@ def from_dict(cls, e): records=e["records"], output_columns=e["output_columns"], column_metadata=e["column_metadata"], - parameters=e["parameters"] + limit=e["limit"], + offset=e["offset"], + filter=e["filter"], + order_by=e["order_by"], + search=e["search"], + duplicate_only=e["duplicate_only"] ) diff --git a/mathesar/utils/explorations.py b/mathesar/utils/explorations.py index 97d88ecce7..988b12a6e5 100644 --- a/mathesar/utils/explorations.py +++ b/mathesar/utils/explorations.py @@ -28,7 +28,6 @@ def run_exploration(exploration_def, database_id, conn): metadata = get_cached_metadata() base_table_oid = exploration_def["base_table_oid"] initial_columns = exploration_def['initial_columns'] - params = exploration_def.get('parameters', {}) processed_initial_columns = [] for column in initial_columns: jp_path = column.get("join_path") @@ -58,13 +57,13 @@ def run_exploration(exploration_def, database_id, conn): metadata=metadata ) records = db_query.get_records( - limit=params.get('limit', 100), - offset=params.get('offset', 0), - filter=params.get('filter', None), - order_by=params.get('order_by', []), - group_by=GroupBy(**params.get('grouping')) if params.get('grouping', None) else None, - search=params.get('search', []), - duplicate_only=params.get('duplicate_only', None) + limit=exploration_def.get('limit', 100), + offset=exploration_def.get('offset', 0), + filter=exploration_def.get('filter', None), + order_by=exploration_def.get('order_by', []), + group_by=GroupBy(**exploration_def.get('grouping')) if exploration_def.get('grouping', None) else None, + search=exploration_def.get('search', []), + duplicate_only=exploration_def.get('duplicate_only', None) ) processed_records = process_annotated_records(records)[0] column_metadata = _get_exploration_column_metadata( @@ -82,13 +81,18 @@ def run_exploration(exploration_def, database_id, conn): "count": get_count( table=db_query.transformed_relation, engine=engine, - filter=params.get('filter', None) + filter=exploration_def.get('filter', None) ), "results": processed_records }, "output_columns": tuple(sa_col.name for sa_col in db_query.sa_output_columns), "column_metadata": column_metadata, - "parameters": params, + "limit": exploration_def.get('limit', 100), + "offset": exploration_def.get('offset', 0), + "filter": exploration_def.get('filter', None), + "order_by": exploration_def.get('order_by', []), + "search": exploration_def.get('search', []), + "duplicate_only": exploration_def.get('duplicate_only', None) } From 6e0c4356515a2b3308e49d425f0c4fb056c6adb6 Mon Sep 17 00:00:00 2001 From: Anish Umale Date: Thu, 1 Aug 2024 02:09:40 +0530 Subject: [PATCH 9/9] remove grouping from get_records --- mathesar/utils/explorations.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/mathesar/utils/explorations.py b/mathesar/utils/explorations.py index 988b12a6e5..6767718ab2 100644 --- a/mathesar/utils/explorations.py +++ b/mathesar/utils/explorations.py @@ -1,5 +1,4 @@ from db.engine import create_future_engine_with_custom_types -from db.records.operations.group import GroupBy from db.records.operations.select import get_count from db.queries.base import DBQuery, InitialColumn, JoinParameter from db.tables.operations.select import get_table @@ -61,7 +60,6 @@ def run_exploration(exploration_def, database_id, conn): offset=exploration_def.get('offset', 0), filter=exploration_def.get('filter', None), order_by=exploration_def.get('order_by', []), - group_by=GroupBy(**exploration_def.get('grouping')) if exploration_def.get('grouping', None) else None, search=exploration_def.get('search', []), duplicate_only=exploration_def.get('duplicate_only', None) )