From 92d5ea65bc7c982813e1da19be4663ef6302fb48 Mon Sep 17 00:00:00 2001 From: Matt Shaver <60105315+matthewshaver@users.noreply.github.com> Date: Mon, 28 Oct 2024 16:05:49 -0400 Subject: [PATCH 1/3] DO NOT MERGE UNTIL NOV 1. 1.6 removal from docs --- contributing/single-sourcing-content.md | 2 +- website/dbt-versions.js | 134 +---------------- .../docs/docs/build/metricflow-time-spine.md | 139 ------------------ website/docs/docs/build/packages.md | 28 ---- website/docs/docs/build/saved-queries.md | 3 - website/docs/docs/build/semantic-models.md | 4 - .../about-core-connections.md | 4 - .../core/connect-data-platform/spark-setup.md | 5 - .../reference/artifacts/run-results-json.md | 4 - website/docs/reference/commands/cmd-docs.md | 4 - website/docs/reference/commands/deps.md | 4 - website/docs/reference/commands/init.md | 5 - website/docs/reference/dbt_project.yml.md | 73 --------- .../reference/global-configs/resource-type.md | 18 +-- .../docs/reference/node-selection/methods.md | 6 - .../docs/reference/resource-configs/access.md | 10 -- .../resource-configs/bigquery-configs.md | 7 - .../reference/resource-configs/contract.md | 9 -- .../resource-configs/databricks-configs.md | 26 +--- .../reference/resource-configs/enabled.md | 20 --- .../docs/reference/resource-configs/group.md | 20 --- .../docs/reference/resource-configs/meta.md | 20 --- .../resource-configs/postgres-configs.md | 17 --- .../resource-configs/redshift-configs.md | 15 -- .../resource-configs/snowflake-configs.md | 27 ---- .../reference/resource-properties/config.md | 20 --- .../resource-properties/freshness.md | 24 +-- .../src/theme/DocRoot/Layout/Main/index.js | 2 +- 28 files changed, 6 insertions(+), 644 deletions(-) diff --git a/contributing/single-sourcing-content.md b/contributing/single-sourcing-content.md index 537980ebdfb..6dc14d760b1 100644 --- a/contributing/single-sourcing-content.md +++ b/contributing/single-sourcing-content.md @@ -90,7 +90,7 @@ This component can be added directly to a markdown file in a similar way as othe Both properties can be used together to set a range where the content should show. In the example below, this content will only show if the selected version is between **0.21** and **1.0**: ```markdown - + Versioned content here diff --git a/website/dbt-versions.js b/website/dbt-versions.js index 9007d719bc0..baa6be6903d 100644 --- a/website/dbt-versions.js +++ b/website/dbt-versions.js @@ -28,11 +28,7 @@ exports.versions = [ }, { version: "1.7", - EOLDate: "2024-10-30", - }, - { - version: "1.6", - EOLDate: "2024-07-31", + EOLDate: "2024-11-01", }, ]; @@ -66,134 +62,6 @@ exports.versionedPages = [ page: "reference/global-configs/indirect-selection", firstVersion: "1.8", }, - { - page: "reference/resource-configs/store_failures_as", - firstVersion: "1.7", - }, - { - page: "docs/build/build-metrics-intro", - firstVersion: "1.6", - }, - { - page: "docs/build/sl-getting-started", - firstVersion: "1.6", - }, - { - page: "docs/build/about-metricflow", - firstVersion: "1.6", - }, - { - page: "docs/build/join-logic", - firstVersion: "1.6", - }, - { - page: "docs/build/validation", - firstVersion: "1.6", - }, - { - page: "docs/build/semantic-models", - firstVersion: "1.6", - }, - { - page: "docs/build/group-by", - firstVersion: "1.6", - }, - { - page: "docs/build/entities", - firstVersion: "1.6", - }, - { - page: "docs/build/metrics-overview", - firstVersion: "1.6", - }, - { - page: "docs/build/cumulative", - firstVersion: "1.6", - }, - { - page: "docs/build/derived", - firstVersion: "1.6", - }, - { - page: "docs/build/measure-proxy", - firstVersion: "1.6", - }, - { - page: "docs/build/ratio", - firstVersion: "1.6", - }, - { - page: "reference/commands/clone", - firstVersion: "1.6", - }, - { - page: "docs/collaborate/govern/project-dependencies", - firstVersion: "1.6", - }, - { - page: "reference/dbt-jinja-functions/thread_id", - firstVersion: "1.6", - }, - { - page: "reference/resource-properties/deprecation_date", - firstVersion: "1.6", - }, - { - page: "reference/commands/retry", - firstVersion: "1.6", - }, - { - page: "docs/build/groups", - firstVersion: "1.5", - }, - { - page: "docs/collaborate/govern/model-contracts", - firstVersion: "1.5", - }, - { - page: "reference/commands/show", - firstVersion: "1.5", - }, - { - page: "docs/collaborate/govern/model-access", - firstVersion: "1.5", - }, - { - page: "docs/collaborate/govern/model-versions", - firstVersion: "1.5", - }, - { - page: "reference/programmatic-invocations", - firstVersion: "1.5", - }, - { - page: "reference/resource-configs/contract", - firstVersion: "1.5", - }, - { - page: "reference/resource-configs/group", - firstVersion: "1.5", - }, - { - page: "reference/resource-properties/access", - firstVersion: "1.5", - }, - { - page: "reference/resource-properties/constraints", - firstVersion: "1.5", - }, - { - page: "reference/resource-properties/latest_version", - firstVersion: "1.5", - }, - { - page: "reference/resource-properties/versions", - firstVersion: "1.5", - }, - { - page: "reference/resource-configs/on_configuration_change", - firstVersion: "1.6", - }, ]; /** diff --git a/website/docs/docs/build/metricflow-time-spine.md b/website/docs/docs/build/metricflow-time-spine.md index 9932a35839c..50d1d68d0bd 100644 --- a/website/docs/docs/build/metricflow-time-spine.md +++ b/website/docs/docs/build/metricflow-time-spine.md @@ -124,42 +124,6 @@ For an example project, refer to our [Jaffle shop](https://github.com/dbt-labs/j - - -```sql -{{ - config( - materialized = 'table', - ) -}} - -with days as ( - - {{ - dbt_utils.date_spine( - 'day', - "to_date('01/01/2000','mm/dd/yyyy')", - "to_date('01/01/2025','mm/dd/yyyy')" - ) - }} - -), - -final as ( - select cast(date_day as date) as date_day - from days -) - -select * from final --- filter the time spine to a specific range -where date_day > dateadd(year, -4, current_timestamp()) -and date_hour < dateadd(day, 30, current_timestamp()) -``` - - - - - ```sql {{ config( @@ -189,42 +153,9 @@ where date_day > dateadd(year, -4, current_timestamp()) and date_hour < dateadd(day, 30, current_timestamp()) ``` - - ### Daily (BigQuery) Use this model if you're using BigQuery. BigQuery supports `DATE()` instead of `TO_DATE()`: - - - - -```sql -{{config(materialized='table')}} -with days as ( - {{dbt_utils.date_spine( - 'day', - "DATE(2000,01,01)", - "DATE(2025,01,01)" - ) - }} -), - -final as ( - select cast(date_day as date) as date_day - from days -) - -select * -from final --- filter the time spine to a specific range -where date_day > dateadd(year, -4, current_timestamp()) -and date_hour < dateadd(day, 30, current_timestamp()) -``` - - - - - @@ -253,7 +184,6 @@ and date_hour < dateadd(day, 30, current_timestamp()) ``` - @@ -306,42 +236,6 @@ To create this table, you need to create a model in your dbt project called `met ### Daily - - - -```sql -{{ - config( - materialized = 'table', - ) -}} - -with days as ( - - {{ - dbt_utils.date_spine( - 'day', - "to_date('01/01/2000','mm/dd/yyyy')", - "to_date('01/01/2025','mm/dd/yyyy')" - ) - }} - -), - -final as ( - select cast(date_day as date) as date_day - from days -) - -select * from final --- filter the time spine to a specific range -where date_day > dateadd(year, -4, current_timestamp()) -and date_hour < dateadd(day, 30, current_timestamp()) -``` - - - - @@ -375,43 +269,11 @@ and date_hour < dateadd(day, 30, current_timestamp()) ``` - ### Daily (BigQuery) Use this model if you're using BigQuery. BigQuery supports `DATE()` instead of `TO_DATE()`: - - - - -```sql -{{config(materialized='table')}} -with days as ( - {{dbt_utils.date_spine( - 'day', - "DATE(2000,01,01)", - "DATE(2025,01,01)" - ) - }} -), - -final as ( - select cast(date_day as date) as date_day - from days -) - -select * -from final --- filter the time spine to a specific range -where date_day > dateadd(year, -4, current_timestamp()) -and date_hour < dateadd(day, 30, current_timestamp()) -``` - - - - - ```sql @@ -438,7 +300,6 @@ and date_hour < dateadd(day, 30, current_timestamp()) ``` - You only need to include the `date_day` column in the table. MetricFlow can handle broader levels of detail, but finer grains are only supported in versions 1.9 and higher. diff --git a/website/docs/docs/build/packages.md b/website/docs/docs/build/packages.md index b4880d88794..f0cd0095a11 100644 --- a/website/docs/docs/build/packages.md +++ b/website/docs/docs/build/packages.md @@ -83,12 +83,8 @@ packages: version: [">=0.7.0", "<0.8.0"] ``` - - Beginning in v1.7, `dbt deps` "pins" each package by default. See ["Pinning packages"](#pinning-packages) for details. - - Where possible, we recommend installing packages via dbt Hub, since this allows dbt to handle duplicate dependencies. This is helpful in situations such as: * Your project uses both the dbt-utils and Snowplow packages, and the Snowplow package _also_ uses the dbt-utils package. * Your project uses both the Snowplow and Stripe packages, both of which use the dbt-utils package. @@ -146,18 +142,8 @@ packages: revision: 4e28d6da126e2940d17f697de783a717f2503188 ``` - - -We **strongly recommend** ["pinning" your packages](#pinning-packages) to a specific release by specifying a release name. - - - - - By default, `dbt deps` "pins" each package. See ["Pinning packages"](#pinning-packages) for details. - - ### Internally hosted tarball URL Some organizations have security requirements to pull resources only from internal services. To address the need to install packages from hosted environments such as Artifactory or cloud storage buckets, dbt Core enables you to install packages from internally-hosted tarball URLs. @@ -319,18 +305,6 @@ When you remove a package from your `packages.yml` file, it isn't automatically ### Pinning packages - - -We **strongly recommend** "pinning" your package to a specific release by specifying a tagged release name or a specific commit hash. - -If you do not provide a revision, or if you use the main branch, then any updates to the package will be incorporated into your project the next time you run `dbt deps`. While we generally try to avoid making breaking changes to these packages, they are sometimes unavoidable. Pinning a package revision helps prevent your code from changing without your explicit approval. - -To find the latest release for a package, navigate to the `Releases` tab in the relevant GitHub repository. For example, you can find all of the releases for the dbt-utils package [here](https://github.com/dbt-labs/dbt-utils/releases). - - - - - Beginning with v1.7, running [`dbt deps`](/reference/commands/deps) "pins" each package by creating or updating the `package-lock.yml` file in the _project_root_ where `packages.yml` is recorded. - The `package-lock.yml` file contains a record of all packages installed. @@ -338,8 +312,6 @@ Beginning with v1.7, running [`dbt deps`](/reference/commands/deps) "pins" each For example, if you use a branch name, the `package-lock.yml` file pins to the head commit. If you use a version range, it pins to the latest release. In either case, subsequent commits or versions will **not** be installed. To get new commits or versions, run `dbt deps --upgrade` or add `package-lock.yml` to your .gitignore file. - - As of v0.14.0, dbt will warn you if you install a package using the `git` syntax without specifying a revision (see below). ### Configuring packages diff --git a/website/docs/docs/build/saved-queries.md b/website/docs/docs/build/saved-queries.md index 649885f9506..ed56d13dcc9 100644 --- a/website/docs/docs/build/saved-queries.md +++ b/website/docs/docs/build/saved-queries.md @@ -154,8 +154,6 @@ saved_queries: - - #### Project-level saved queries To enable saved queries at the project level, you can set the `saved-queries` configuration in the [`dbt_project.yml` file](/reference/dbt_project.yml). This saves you time in configuring saved queries in each file: @@ -171,7 +169,6 @@ saved-queries: For more information on `dbt_project.yml` and config naming conventions, see the [dbt_project.yml reference page](/reference/dbt_project.yml#naming-convention). - To build `saved_queries`, use the [`--resource-type` flag](/reference/global-configs/resource-type) and run the command `dbt build --resource-type saved_query`. diff --git a/website/docs/docs/build/semantic-models.md b/website/docs/docs/build/semantic-models.md index d683d7cd020..609d7f1ff8d 100644 --- a/website/docs/docs/build/semantic-models.md +++ b/website/docs/docs/build/semantic-models.md @@ -119,8 +119,6 @@ semantic_models: type: categorical ``` - - Semantic models support [`meta`](/reference/resource-configs/meta), [`group`](/reference/resource-configs/group), and [`enabled`](/reference/resource-configs/enabled) [`config`](/reference/resource-properties/config) property in either the schema file or at the project level: - Semantic model config in `models/semantic.yml`: @@ -148,8 +146,6 @@ Semantic models support [`meta`](/reference/resource-configs/meta), [`group`](/r For more information on `dbt_project.yml` and config naming conventions, see the [dbt_project.yml reference page](/reference/dbt_project.yml#naming-convention). - - ### Name Define the name of the semantic model. You must define a unique name for the semantic model. The semantic graph will use this name to identify the model, and you can update it at any time. Avoid using double underscores (\_\_) in the name as they're not supported. diff --git a/website/docs/docs/core/connect-data-platform/about-core-connections.md b/website/docs/docs/core/connect-data-platform/about-core-connections.md index 461aeea2e87..221f495d054 100644 --- a/website/docs/docs/core/connect-data-platform/about-core-connections.md +++ b/website/docs/docs/core/connect-data-platform/about-core-connections.md @@ -32,8 +32,6 @@ If you're using dbt from the command line (CLI), you'll need a profiles.yml file For detailed info, you can refer to the [Connection profiles](/docs/core/connect-data-platform/connection-profiles). - - ## Adapter features The following table lists the features available for adapters: @@ -55,5 +53,3 @@ For adapters that support it, you can partially build the catalog. This allows t ### Source freshness You can measure source freshness using the warehouse metadata tables on supported adapters. This allows for calculating source freshness without using the [`loaded_at_field`](/reference/resource-properties/freshness#loaded_at_field) and without querying the table directly. This is faster and more flexible (though it might sometimes be inaccurate, depending on how the warehouse tracks altered tables). You can override this with the `loaded_at_field` in the [source config](/reference/source-configs). If the adapter doesn't support this, you can still use the `loaded_at_field`. - - diff --git a/website/docs/docs/core/connect-data-platform/spark-setup.md b/website/docs/docs/core/connect-data-platform/spark-setup.md index 01318211c8f..611642e91b7 100644 --- a/website/docs/docs/core/connect-data-platform/spark-setup.md +++ b/website/docs/docs/core/connect-data-platform/spark-setup.md @@ -197,14 +197,9 @@ connect_retries: 3 - - - - ### Server side configuration Spark can be customized using [Application Properties](https://spark.apache.org/docs/latest/configuration.html). Using these properties the execution can be customized, for example, to allocate more memory to the driver process. Also, the Spark SQL runtime can be set through these properties. For example, this allows the user to [set a Spark catalogs](https://spark.apache.org/docs/latest/configuration.html#spark-sql). - ## Caveats diff --git a/website/docs/reference/artifacts/run-results-json.md b/website/docs/reference/artifacts/run-results-json.md index ff8da3559fa..13ad528d185 100644 --- a/website/docs/reference/artifacts/run-results-json.md +++ b/website/docs/reference/artifacts/run-results-json.md @@ -44,8 +44,6 @@ import RowsAffected from '/snippets/_run-result.md'; - - The run_results.json includes three attributes related to the `applied` state that complement `unique_id`: - `compiled`: Boolean entry of the node compilation status (`False` after parsing, but `True` after compiling). @@ -195,5 +193,3 @@ Here's a printed snippet from the `run_results.json`: } ], ``` - - diff --git a/website/docs/reference/commands/cmd-docs.md b/website/docs/reference/commands/cmd-docs.md index f20da08a4ae..03e11ae89f0 100644 --- a/website/docs/reference/commands/cmd-docs.md +++ b/website/docs/reference/commands/cmd-docs.md @@ -20,8 +20,6 @@ The command is responsible for generating your project's documentation website b dbt docs generate ``` - - Use the `--select` argument to limit the nodes included within `catalog.json`. When this flag is provided, step (3) will be restricted to the selected nodes. All other nodes will be excluded. Step (2) is unaffected. **Example**: @@ -30,8 +28,6 @@ Use the `--select` argument to limit the nodes included within `catalog.json`. W dbt docs generate --select +orders ``` - - Use the `--no-compile` argument to skip re-compilation. When this flag is provided, `dbt docs generate` will skip step (2) described above. **Example**: diff --git a/website/docs/reference/commands/deps.md b/website/docs/reference/commands/deps.md index 85c103e6337..0cb8e50f7a6 100644 --- a/website/docs/reference/commands/deps.md +++ b/website/docs/reference/commands/deps.md @@ -58,8 +58,6 @@ Updates available for packages: ['tailsdotcom/dbt_artifacts', 'dbt-labs/snowplow Update your versions in packages.yml, then run dbt deps ``` - - ## Predictable package installs Starting in dbt Core v1.7, dbt generates a `package-lock.yml` file in the root of your project. This contains the complete set of resolved packages based on the `packages` configuration in `dependencies.yml` or `packages.yml`. Each subsequent invocation of `dbt deps` will install from the _locked_ set of packages specified in this file. Storing the complete set of required packages (with pinned versions) in version-controlled code ensures predictable installs in production and consistency across all developers and environments. @@ -97,5 +95,3 @@ dbt deps --add-package https://github.com/fivetran/dbt_amplitude@v0.3.0 --source # add package from local dbt deps --add-package /opt/dbt/redshift --source local ``` - - diff --git a/website/docs/reference/commands/init.md b/website/docs/reference/commands/init.md index 8945eb823db..112fff63a38 100644 --- a/website/docs/reference/commands/init.md +++ b/website/docs/reference/commands/init.md @@ -17,15 +17,10 @@ Then, it will: - Create a new folder with your project name and sample files, enough to get you started with dbt - Create a connection profile on your local machine. The default location is `~/.dbt/profiles.yml`. Read more in [configuring your profile](/docs/core/connect-data-platform/connection-profiles). - - When using `dbt init` to initialize your project, include the `--profile` flag to specify an existing `profiles.yml` as the `profile:` key to use instead of creating a new one. For example, `dbt init --profile profile_name`. - - If the profile does not exist in `profiles.yml` or the command is run inside an existing project, the command raises an error. - ## Existing project diff --git a/website/docs/reference/dbt_project.yml.md b/website/docs/reference/dbt_project.yml.md index e7cd5bbeb79..1bb9dd2cf9c 100644 --- a/website/docs/reference/dbt_project.yml.md +++ b/website/docs/reference/dbt_project.yml.md @@ -14,8 +14,6 @@ Every [dbt project](/docs/build/projects) needs a `dbt_project.yml` file — thi The following example is a list of all available configurations in the `dbt_project.yml` file: - - ```yml @@ -94,77 +92,6 @@ vars: ``` - - - - - - -```yml -[name](/reference/project-configs/name): string - -[config-version](/reference/project-configs/config-version): 2 -[version](/reference/project-configs/version): version - -[profile](/reference/project-configs/profile): profilename - -[model-paths](/reference/project-configs/model-paths): [directorypath] -[seed-paths](/reference/project-configs/seed-paths): [directorypath] -[test-paths](/reference/project-configs/test-paths): [directorypath] -[analysis-paths](/reference/project-configs/analysis-paths): [directorypath] -[macro-paths](/reference/project-configs/macro-paths): [directorypath] -[snapshot-paths](/reference/project-configs/snapshot-paths): [directorypath] -[docs-paths](/reference/project-configs/docs-paths): [directorypath] -[asset-paths](/reference/project-configs/asset-paths): [directorypath] - -[packages-install-path](/reference/project-configs/packages-install-path): directorypath - -[clean-targets](/reference/project-configs/clean-targets): [directorypath] - -[query-comment](/reference/project-configs/query-comment): string - -[require-dbt-version](/reference/project-configs/require-dbt-version): version-range | [version-range] - -[dbt-cloud](/docs/cloud/cloud-cli-installation): - [project-id](/docs/cloud/configure-cloud-cli#configure-the-dbt-cloud-cli): project_id # Required - [defer-env-id](/docs/cloud/about-cloud-develop-defer#defer-in-dbt-cloud-cli): environment_id # Optional - -[quoting](/reference/project-configs/quoting): - database: true | false - schema: true | false - identifier: true | false - -models: - [](/reference/model-configs) - -seeds: - [](/reference/seed-configs) - -snapshots: - [](/reference/snapshot-configs) - -sources: - [](source-configs) - -tests: - [](/reference/data-test-configs) - -vars: - [](/docs/build/project-variables) - -[on-run-start](/reference/project-configs/on-run-start-on-run-end): sql-statement | [sql-statement] -[on-run-end](/reference/project-configs/on-run-start-on-run-end): sql-statement | [sql-statement] - -[dispatch](/reference/project-configs/dispatch-config): - - macro_namespace: packagename - search_order: [packagename] - -[restrict-access](/docs/collaborate/govern/model-access): true | false - -``` - - - ## Naming convention diff --git a/website/docs/reference/global-configs/resource-type.md b/website/docs/reference/global-configs/resource-type.md index 9e6ec82df06..431b6c049cb 100644 --- a/website/docs/reference/global-configs/resource-type.md +++ b/website/docs/reference/global-configs/resource-type.md @@ -24,20 +24,7 @@ The `--exclude-resource-type` flag is only available in dbt version 1.8 and high The available resource types are: - - -- [`analysis`](/docs/build/analyses) -- [`exposure`](/docs/build/exposures) -- [`metric`](/docs/build/metrics-overview) -- [`model`](/docs/build/models) -- [`seed`](/docs/build/seeds) -- [`snapshot`](/docs/build/snapshots) -- [`source`](/docs/build/sources) -- [`test`](/docs/build/data-tests) - - - - + - [`analysis`](/docs/build/analyses) - [`exposure`](/docs/build/exposures) @@ -82,7 +69,6 @@ Instead of targeting specific resources, use the `--resource-flag` or `--exclude - - In this example, run the following command to include _all_ saved queries with the `--resource-type` flag: @@ -94,8 +80,6 @@ Instead of targeting specific resources, use the `--resource-flag` or `--exclude - - - In this example, use the following command to exclude _all_ unit tests from your dbt build process. Note that the `--exclude-resource-type` flag is only available in dbt version 1.8 and higher: diff --git a/website/docs/reference/node-selection/methods.md b/website/docs/reference/node-selection/methods.md index 38484494e4b..7587a9fd2b1 100644 --- a/website/docs/reference/node-selection/methods.md +++ b/website/docs/reference/node-selection/methods.md @@ -310,10 +310,6 @@ dbt list --select "+semantic_model:orders" # list your semantic model named "or ``` ### The "saved_query" method - -Supported in v1.7 or newer. - - The `saved_query` method selects [saved queries](/docs/build/saved-queries). @@ -322,8 +318,6 @@ dbt list --select "saved_query:*" # list all saved queries dbt list --select "+saved_query:orders_saved_query" # list your saved query named "orders_saved_query" and all upstream resources ``` - - ### The "unit_test" method diff --git a/website/docs/reference/resource-configs/access.md b/website/docs/reference/resource-configs/access.md index 0f67a454344..c73e09dd639 100644 --- a/website/docs/reference/resource-configs/access.md +++ b/website/docs/reference/resource-configs/access.md @@ -15,14 +15,6 @@ models: - - -Access modifiers may be applied to models one-by-one in YAML properties. In v1.5 and v1.6, you are unable to configure `access` for multiple models at once. Upgrade to v1.7 for additional configuration options. A group or subfolder contains models with varying access levels, so when you designate a model with `access: public`, make sure you intend for this behavior. - - - - - You can apply access modifiers in config files, including the `dbt_project.yml`, or to models one-by-one in `properties.yml`. Applying access configs to a subfolder modifies the default for all models in that subfolder, so make sure you intend for this behavior. When setting individual model access, a group or subfolder might contain a variety of access levels, so when you designate a model with `access: public` make sure you intend for this behavior. There are multiple approaches to configuring access: @@ -83,8 +75,6 @@ There are multiple approaches to configuring access: ``` - - After you define `access`, rerun a production job to apply the change. ## Definition diff --git a/website/docs/reference/resource-configs/bigquery-configs.md b/website/docs/reference/resource-configs/bigquery-configs.md index b943f114861..9dd39c936b6 100644 --- a/website/docs/reference/resource-configs/bigquery-configs.md +++ b/website/docs/reference/resource-configs/bigquery-configs.md @@ -710,8 +710,6 @@ models: Views with this configuration will be able to select from objects in `project_1.dataset_1` and `project_2.dataset_2`, even when they are located elsewhere and queried by users who do not otherwise have access to `project_1.dataset_1` and `project_2.dataset_2`. - - ## Materialized views The BigQuery adapter supports [materialized views](https://cloud.google.com/bigquery/docs/materialized-views-intro) @@ -894,10 +892,6 @@ As with most data platforms, there are limitations associated with materialized Find more information about materialized view limitations in Google's BigQuery [docs](https://cloud.google.com/bigquery/docs/materialized-views-intro#limitations). - - - - ## Python models The BigQuery adapter supports Python models with the following additional configuration parameters: @@ -914,4 +908,3 @@ By default, this is set to `True` to support the default `intermediate_format` o ### The `intermediate_format` parameter The `intermediate_format` parameter specifies which file format to use when writing records to a table. The default is `parquet`. - diff --git a/website/docs/reference/resource-configs/contract.md b/website/docs/reference/resource-configs/contract.md index 2f52fc26e1f..fb25076b0d9 100644 --- a/website/docs/reference/resource-configs/contract.md +++ b/website/docs/reference/resource-configs/contract.md @@ -16,14 +16,6 @@ This is to ensure that the people querying your model downstream—both inside a ## Data type aliasing - - -The `data_type` defined in your YAML file must match a data type your data platform recognizes. dbt does not do any type aliasing itself. If your data platform recognizes both `int` and `integer` as corresponding to the same type, then they will return a match. - - - - - dbt uses built-in type aliasing for the `data_type` defined in your YAML. For example, you can specify `string` in your contract, and on Postgres/Redshift, dbt will convert it to `text`. If dbt doesn't recognize the `data_type` name among its known aliases, it will pass it through as-is. This is enabled by default, but you can opt-out by setting `alias_types` to `false`. Example for disabling: @@ -42,7 +34,6 @@ models: ``` - ## Size, precision, and scale diff --git a/website/docs/reference/resource-configs/databricks-configs.md b/website/docs/reference/resource-configs/databricks-configs.md index 88014d0ac4d..1a7bd42b848 100644 --- a/website/docs/reference/resource-configs/databricks-configs.md +++ b/website/docs/reference/resource-configs/databricks-configs.md @@ -7,23 +7,7 @@ id: "databricks-configs" When materializing a model as `table`, you may include several optional configs that are specific to the dbt-databricks plugin, in addition to the standard [model configs](/reference/model-configs). - - - -| Option | Description | Required? | Model Support | Example | -|---------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------|---------------|--------------------------| -| file_format | The file format to use when creating tables (`parquet`, `delta`, `hudi`, `csv`, `json`, `text`, `jdbc`, `orc`, `hive` or `libsvm`). | Optional | SQL, Python | `delta` | -| location_root | The created table uses the specified directory to store its data. The table alias is appended to it. | Optional | SQL, Python | `/mnt/root` | -| partition_by | Partition the created table by the specified columns. A directory is created for each partition. | Optional | SQL, Python | `date_day` | -| liquid_clustered_by | Cluster the created table by the specified columns. Clustering method is based on [Delta's Liquid Clustering feature](https://docs.databricks.com/en/delta/clustering.html). Available since dbt-databricks 1.6.2. | Optional | SQL | `date_day` | -| clustered_by | Each partition in the created table will be split into a fixed number of buckets by the specified columns. | Optional | SQL, Python | `country_code` | -| buckets | The number of buckets to create while clustering. | Required if `clustered_by` is specified | SQL, Python | `8` | -| tblproperties | [Tblproperties](https://docs.databricks.com/en/sql/language-manual/sql-ref-syntax-ddl-tblproperties.html) to be set on the created table. | Optional | SQL | `{'this.is.my.key': 12}` | -| compression | Set the compression algorithm. | Optional | SQL, Python | `zstd` | - - - - + | Option | Description | Required? | Model Support | Example | @@ -492,8 +476,6 @@ insert into analytics.replace_where_incremental - - ## Selecting compute per model Beginning in version 1.7.2, you can assign which compute resource to use on a per-model basis. @@ -682,8 +664,6 @@ def model(dbt, session): If your default compute is a SQL Warehouse, you will need to specify an all purpose cluster `http_path` in this way. - - ## Persisting model descriptions Relation-level docs persistence is supported in dbt v0.17.0. For more @@ -895,9 +875,5 @@ One application of this feature is making `delta` tables compatible with `iceber ) }} ``` - - `tblproperties` can be specified for python models, but they will be applied via an `ALTER` statement after table creation. This is due to a limitation in PySpark. - - diff --git a/website/docs/reference/resource-configs/enabled.md b/website/docs/reference/resource-configs/enabled.md index febf1e50c88..b74d7250907 100644 --- a/website/docs/reference/resource-configs/enabled.md +++ b/website/docs/reference/resource-configs/enabled.md @@ -230,14 +230,6 @@ exposures: - - -Support for disabling semantic models has been added in dbt Core v1.7 - - - - - ```yaml @@ -259,20 +251,10 @@ semantic_models: - - - - -Support for disabling saved queries has been added in dbt Core v1.7. - - - - - ```yaml @@ -294,8 +276,6 @@ saved_queries: - - diff --git a/website/docs/reference/resource-configs/group.md b/website/docs/reference/resource-configs/group.md index 717d7de89f5..cd0ad2683f5 100644 --- a/website/docs/reference/resource-configs/group.md +++ b/website/docs/reference/resource-configs/group.md @@ -218,14 +218,6 @@ metrics: - - -Support for grouping semantic models has been added in dbt Core v1.7. - - - - - ```yaml @@ -247,20 +239,10 @@ semantic_models: - - - - -Support for grouping saved queries has been added in dbt Core v1.7. - - - - - ```yaml @@ -282,8 +264,6 @@ saved_queries: - - diff --git a/website/docs/reference/resource-configs/meta.md b/website/docs/reference/resource-configs/meta.md index 2bcccdd4141..53a4f77184e 100644 --- a/website/docs/reference/resource-configs/meta.md +++ b/website/docs/reference/resource-configs/meta.md @@ -179,14 +179,6 @@ exposures: - - -Support for grouping semantic models was added in dbt Core v1.7 - - - - - ```yml @@ -201,8 +193,6 @@ semantic_models: The `meta` config can also be defined under the `semantic-models` config block in `dbt_project.yml`. See [configs and properties](/reference/configs-and-properties) for details. - - @@ -249,14 +239,6 @@ metrics: - - -Support for saved queries has been added in dbt Core v1.7. - - - - - ```yml @@ -268,8 +250,6 @@ saved_queries: - - diff --git a/website/docs/reference/resource-configs/postgres-configs.md b/website/docs/reference/resource-configs/postgres-configs.md index 07cfc938f1c..f2bf90a93c0 100644 --- a/website/docs/reference/resource-configs/postgres-configs.md +++ b/website/docs/reference/resource-configs/postgres-configs.md @@ -185,20 +185,3 @@ It's worth noting that, unlike tables, dbt monitors this parameter for changes a This happens via a `DROP/CREATE` of the indexes, which can be thought of as an `ALTER` of the materialized view. Learn more about these parameters in Postgres's [docs](https://www.postgresql.org/docs/current/sql-creatematerializedview.html). - - - -### Limitations - -#### Changing materialization to and from "materialized_view" - -Swapping an already materialized model to a materialized view, and vice versa, is not supported. -The workaround is to manually drop the existing materialization in the data warehouse prior to calling `dbt run`. -Running with `--full-refresh` flag will not work to drop the existing table or view and create the materialized view (and vice versa). -This would only need to be done once as the existing object would then be a materialized view. - -For example,`my_model`, has already been materialized as a table in the underlying data platform via `dbt run`. -If the user changes the model's config to `materialized="materialized_view"`, they will get an error. -The solution is to execute `DROP TABLE my_model` on the data warehouse before trying the model again. - - diff --git a/website/docs/reference/resource-configs/redshift-configs.md b/website/docs/reference/resource-configs/redshift-configs.md index e7149ae484e..b033cd6267e 100644 --- a/website/docs/reference/resource-configs/redshift-configs.md +++ b/website/docs/reference/resource-configs/redshift-configs.md @@ -230,21 +230,6 @@ As with most data platforms, there are limitations associated with materialized Find more information about materialized view limitations in Redshift's [docs](https://docs.aws.amazon.com/redshift/latest/dg/materialized-view-create-sql-command.html#mv_CREATE_MATERIALIZED_VIEW-limitations). - - -#### Changing materialization from "materialized_view" to "table" or "view" - -Swapping a materialized view to a table or view is not supported. -You must manually drop the existing materialized view in the data warehouse before calling `dbt run`. -Normally, re-running with the `--full-refresh` flag would resolve this, but not in this case. -This would only need to be done once as the existing object would then be a materialized view. - -For example, assume that a materialized view, `my_mv.sql`, has already been materialized to the underlying data platform via `dbt run`. -If the user changes the model's config to `materialized="table"`, they will get an error. -The workaround is to execute `DROP MATERIALIZED VIEW my_mv CASCADE` on the data warehouse before trying the model again. - - - ## Unit test limitations diff --git a/website/docs/reference/resource-configs/snowflake-configs.md b/website/docs/reference/resource-configs/snowflake-configs.md index abb516d2258..b95b79241ba 100644 --- a/website/docs/reference/resource-configs/snowflake-configs.md +++ b/website/docs/reference/resource-configs/snowflake-configs.md @@ -337,33 +337,6 @@ For dbt limitations, these dbt features are not supported: - [Model contracts](/docs/collaborate/govern/model-contracts) - [Copy grants configuration](/reference/resource-configs/snowflake-configs#copying-grants) - - -#### Changing materialization to and from "dynamic_table" - -Version `1.6.x` does not support altering the materialization from a non-dynamic table be a dynamic table and vice versa. -Re-running with the `--full-refresh` does not resolve this either. -The workaround is manually dropping the existing model in the warehouse prior to calling `dbt run`. -This only needs to be done once for the conversion. - -For example, assume for the example model below, `my_model`, has already been materialized to the underlying data platform via `dbt run`. -If the model config is updated to `materialized="dynamic_table"`, dbt will return an error. -The workaround is to execute `DROP TABLE my_model` on the data warehouse before trying the model again. - - - -```yaml - -{{ config( - materialized="table" # or any model type (e.g. view, incremental) -) }} - -``` - - - - - ## Temporary tables Incremental table merges for Snowflake prefer to utilize a `view` rather than a `temporary table`. The reasoning is to avoid the database write step that a temporary table would initiate and save compile time. diff --git a/website/docs/reference/resource-properties/config.md b/website/docs/reference/resource-properties/config.md index 8190c7dd8ca..1e1867dda04 100644 --- a/website/docs/reference/resource-properties/config.md +++ b/website/docs/reference/resource-properties/config.md @@ -170,14 +170,6 @@ exposures: - - -Support for the `config` property on `semantic_models` was added in dbt Core v1.7 - - - - - ```yml @@ -193,20 +185,10 @@ semantic_models: - - - - -Support for the `config` property on `saved queries` was added in dbt Core v1.7. - - - - - ```yml @@ -226,8 +208,6 @@ saved-queries: - - diff --git a/website/docs/reference/resource-properties/freshness.md b/website/docs/reference/resource-properties/freshness.md index 03037e7b681..d68dee4fade 100644 --- a/website/docs/reference/resource-properties/freshness.md +++ b/website/docs/reference/resource-properties/freshness.md @@ -37,8 +37,6 @@ A freshness block is used to define the acceptable amount of time between the mo In the `freshness` block, one or both of `warn_after` and `error_after` can be provided. If neither is provided, then dbt will not calculate freshness snapshots for the tables in this source. - - In most cases, the `loaded_at_field` is required. Some adapters support calculating source freshness from the warehouse metadata tables and can exclude the `loaded_at_field`. If a source has a `freshness:` block, dbt will attempt to calculate freshness for that source: @@ -62,29 +60,9 @@ To exclude a source from freshness calculations, you have two options: - Don't add a `freshness:` block. - Explicitly set `freshness: null`. - - - - -Additionally, the `loaded_at_field` is required to calculate freshness for a table. If a `loaded_at_field` is not provided, then dbt will not calculate freshness for the table. - -Freshness blocks are applied hierarchically: -- A `freshness` and `loaded_at_field` property added to a source will be applied to all tables defined in that source -- A `freshness` and `loaded_at_field` property added to a source _table_ will override any properties applied to the source. - -This is useful when all of the tables in a source have the same `loaded_at_field`, as is often the case. - - ## loaded_at_field - -(Optional on adapters that support pulling freshness from warehouse metadata tables, required otherwise.) - - - -(Required) - - +Optional on adapters that support pulling freshness from warehouse metadata tables, required otherwise.

A column name (or expression) that returns a timestamp indicating freshness. If using a date field, you may have to cast it to a timestamp: diff --git a/website/src/theme/DocRoot/Layout/Main/index.js b/website/src/theme/DocRoot/Layout/Main/index.js index a8c9d449b82..154c3cbfab6 100644 --- a/website/src/theme/DocRoot/Layout/Main/index.js +++ b/website/src/theme/DocRoot/Layout/Main/index.js @@ -89,7 +89,7 @@ export default function DocRootLayoutMain({ if (new Date() > new Date(EOLDate)) { setEOLData({ showEOLBanner: true, - EOLBannerText: `This version of dbt Core is no longer supported. There will be no more patches or security fixes. For improved performance, security, and features, upgrade to the latest stable version.`, + EOLBannerText: `This version of dbt Core is no longer supported. There will be no more patches or security fixes. For improved performance, security, and features, upgrade to the latest stable version. Some dbt Cloud customers might have an extended critical support window. `, }); } else if (new Date() > threeMonths) { setEOLData({ From 736791d44d4fa2ead8da78b2cbba5e2080cc45d9 Mon Sep 17 00:00:00 2001 From: Matt Shaver <60105315+matthewshaver@users.noreply.github.com> Date: Tue, 29 Oct 2024 14:26:34 -0400 Subject: [PATCH 2/3] Update website/docs/docs/build/packages.md --- website/docs/docs/build/packages.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/docs/docs/build/packages.md b/website/docs/docs/build/packages.md index f0cd0095a11..49cd7e00b1c 100644 --- a/website/docs/docs/build/packages.md +++ b/website/docs/docs/build/packages.md @@ -83,7 +83,7 @@ packages: version: [">=0.7.0", "<0.8.0"] ``` -Beginning in v1.7, `dbt deps` "pins" each package by default. See ["Pinning packages"](#pinning-packages) for details. +`dbt deps` "pins" each package by default. See ["Pinning packages"](#pinning-packages) for details. Where possible, we recommend installing packages via dbt Hub, since this allows dbt to handle duplicate dependencies. This is helpful in situations such as: * Your project uses both the dbt-utils and Snowplow packages, and the Snowplow package _also_ uses the dbt-utils package. From b68c42e384a991d2197b117576d29492ed12cdee Mon Sep 17 00:00:00 2001 From: Matt Shaver <60105315+matthewshaver@users.noreply.github.com> Date: Tue, 29 Oct 2024 14:35:28 -0400 Subject: [PATCH 3/3] 1.6 updates --- website/docs/docs/build/environment-variables.md | 2 +- website/docs/docs/collaborate/govern/project-dependencies.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/website/docs/docs/build/environment-variables.md b/website/docs/docs/build/environment-variables.md index c26425401a7..b87786ac596 100644 --- a/website/docs/docs/build/environment-variables.md +++ b/website/docs/docs/build/environment-variables.md @@ -102,7 +102,7 @@ dbt Cloud has a number of pre-defined variables built in. Variables are set auto The following environment variable is set automatically for the dbt Cloud IDE: - `DBT_CLOUD_GIT_BRANCH` — Provides the development Git branch name in the [dbt Cloud IDE](/docs/cloud/dbt-cloud-ide/develop-in-the-cloud). - - Available in dbt v 1.6 and later. + - Available in dbt v1.6 and later. - The variable changes when the branch is changed. - Doesn't require restarting the IDE after a branch change. - Currently not available in the [dbt Cloud CLI](/docs/cloud/cloud-cli-installation). diff --git a/website/docs/docs/collaborate/govern/project-dependencies.md b/website/docs/docs/collaborate/govern/project-dependencies.md index c054d1b27b7..7813e25efcb 100644 --- a/website/docs/docs/collaborate/govern/project-dependencies.md +++ b/website/docs/docs/collaborate/govern/project-dependencies.md @@ -18,7 +18,7 @@ This year, dbt Labs is introducing an expanded notion of `dependencies` across m ## Prerequisites - Available in [dbt Cloud Enterprise](https://www.getdbt.com/pricing). If you have an Enterprise account, you can unlock these features by designating a [public model](/docs/collaborate/govern/model-access) and adding a [cross-project ref](#how-to-write-cross-project-ref). -- Use a supported version of dbt (v1.6, v1.7, or go versionless with "[Versionless](/docs/dbt-versions/upgrade-dbt-version-in-cloud#versionless)") for both the upstream ("producer") project and the downstream ("consumer") project. +- Use a supported version of dbt (v1.6 or newer or go versionless with "[Versionless](/docs/dbt-versions/upgrade-dbt-version-in-cloud#versionless)") for both the upstream ("producer") project and the downstream ("consumer") project. - Define models in an upstream ("producer") project that are configured with [`access: public`](/reference/resource-configs/access). You need at least one successful job run after defining their `access`. - Define a deployment environment in the upstream ("producer") project [that is set to be your Production environment](/docs/deploy/deploy-environments#set-as-production-environment), and ensure it has at least one successful job run in that environment. - If the upstream project has a Staging environment, run a job in that Staging environment to ensure the downstream cross-project ref resolves.