diff --git a/.github/workflows/tsdb-refresh-gucs-list.yaml b/.github/workflows/tsdb-refresh-gucs-list.yaml
new file mode 100644
index 0000000000..a87d88e76a
--- /dev/null
+++ b/.github/workflows/tsdb-refresh-gucs-list.yaml
@@ -0,0 +1,37 @@
+name: "TimescaleDB: Update GUCs list"
+
+on:
+ workflow_call:
+ inputs:
+ tag:
+ description: 'Tag to refesh list from'
+ required: true
+ type: string
+
+permissions:
+ contents: write
+ pull-requests: write
+
+jobs:
+ update-gucs-list:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v4
+ - uses: actions/setup-python@v5
+ with:
+ python-version: '3.13'
+ cache: 'pip' # caching pip dependencies
+
+ - name: Update list of GUCs
+ run: |
+ pip install -r ./helper-scripts/timescaledb/requirements.txt
+ python ./helper-scripts/timescaledb/generate_guc_overview.py "${{ github.event.inputs.tag }}" ./_partials/_timescaledb-gucs.md
+ mv gucs.md
+
+ - name: Create Pull Request
+ uses: peter-evans/create-pull-request@v7
+ with:
+ token: ${{ secrets.ORG_AUTOMATION_TOKEN }}
+ add-paths: _partials/_timescaledb-gucs.md
+ delete-branch: true
+ title: "Updated list of GUCs from TimescaleDB ${{ github.event.inputs.tag }}"
diff --git a/.helper-scripts/timescaledb/generate_guc_overview.py b/.helper-scripts/timescaledb/generate_guc_overview.py
new file mode 100644
index 0000000000..e42a4c3f69
--- /dev/null
+++ b/.helper-scripts/timescaledb/generate_guc_overview.py
@@ -0,0 +1,132 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+#
+# Generate Overview page of available GUCs in TimescaleDB with descriptions
+#
+# Args:
+# tag: tag to pull the guc.c from
+#
+
+import argparse
+import requests
+import re
+import logging
+
+logging.basicConfig(format='%(asctime)s %(levelname)s: %(message)s', level=logging.INFO)
+
+parser = argparse.ArgumentParser()
+parser.add_argument('tag', type=str, help='tag name to pull guc.c')
+parser.add_argument('destination', type=str, help='file name to add output')
+args = parser.parse_args()
+
+TYPES = {
+ "DefineCustomBoolVariable": "BOOLEAN",
+ "DefineCustomIntVariable": "INTEGER",
+ "DefineCustomEnumVariable": "ENUM",
+ "DefineCustomStringVariable": "STRING",
+}
+
+# List of GUCs to exclude from the docs
+EXCLUDE = []
+
+"""
+Fetch the guc.c content from GitHub
+@param url: str
+@return str
+"""
+def get_content(url: str) -> str:
+ resp = requests.get(url=url)
+ if resp.status_code != 200:
+ logging.error("can not fetch: %s" % url)
+ exit(10)
+ return resp.text
+
+"""
+Unwrap parsed GUCs into a map with GUC name as key and the value with the
+extracted values from the GUC:
+ /* name= */,
+ /* short_desc= */,
+ /* long_desc= */,
+ /* valueAddr= */,
+ /* Value= */,
+ /* context= */,
+ /* flags= */,
+ /* check_hook= */,
+ /* assign_hook= */,
+ /* show_hook= */
+@param gucs: list
+@param guc_type: str
+@return dict
+"""
+def unwrap(gucs: list, guc_type: str) -> dict:
+ map = {}
+
+ for guc in gucs:
+ # sanitize data
+ it = [re.sub(r"[\n\t]*", "", v).strip() for v in guc.split(",")]
+
+ # sanitize elements
+ name = re.sub(r"[\"\(\)]*", "", it[0])
+ short_desc = it[1].strip("\"")
+ long_desc = it[1] if it[2].lower() == "null" else re.sub(r"[\"\"]*", "", it[2].strip("\""))
+ value = it[4]
+
+ # TODO: clean up /* Value= */ from strings
+ #
+
+ # Exclude GUCs (if specified)
+ if name not in EXCLUDE:
+ map[name] = {
+ "name": name,
+ "short_desc": short_desc,
+ "long_desc": long_desc,
+ "value": value,
+ "type": guc_type,
+ "scopes": [], # assigned later during scope discovery
+ }
+
+ logging.info("registered %d GUCs of type: %s" % (len(map), guc_type))
+ return map
+
+"""
+Parse GUCs and prepare them for rendering
+@param content: str
+@return dict
+"""
+def prepare(content: str) -> dict:
+ map = {}
+
+ # Find all GUCs based on patterns and prepare them in a dict
+ for pattern, val in TYPES.items():
+ map.update(unwrap(re.findall(r"%s\(MAKE_EXTOPTION(.*?)\);" % pattern, content, re.DOTALL), val))
+
+ # TODO: find scopes
+ # https://github.com/timescale/timescaledb/blob/2.19.x/src/guc.c#L797
+
+
+ # Return dict with alphabetically sorted keys
+ return {i: map[i] for i in sorted(map.keys())}
+
+"""
+Render the GUCs to file
+"""
+def render(gucs: dict, filename: str):
+ with open(filename, "w") as f:
+ f.write("| Name | Type | Short Description | Short Description | Value |\n")
+ f.write("| --- | --- | --- | --- | --- |\n")
+ for guc in gucs.values():
+ f.write("| `%s` | `%s` | %s | %s | `%s` |\n" % (
+ guc["name"], guc["type"], guc["short_desc"], guc["long_desc"], guc["value"]
+ ))
+ logging.info("rendering completed to %s" % filename)
+
+"""
+Main
+"""
+if __name__ == "__main__":
+ content = get_content("https://raw.githubusercontent.com/timescale/timescaledb/refs/tags/%s/src/guc.c" % args.tag)
+ logging.info("fetched guc.c file for version: %s" % args.tag)
+ gucs = prepare(content)
+ render(gucs, args.destination)
+
+# print(gucs)
diff --git a/.helper-scripts/timescaledb/requirements.txt b/.helper-scripts/timescaledb/requirements.txt
new file mode 100644
index 0000000000..d80d9fc2a3
--- /dev/null
+++ b/.helper-scripts/timescaledb/requirements.txt
@@ -0,0 +1 @@
+requests==2.32.3
diff --git a/_partials/_timescaledb-config.md b/_partials/_timescaledb-config.md
new file mode 100644
index 0000000000..c54887fbbc
--- /dev/null
+++ b/_partials/_timescaledb-config.md
@@ -0,0 +1,70 @@
+import ConfigCloudSelf from "versionContent/_partials/_cloud_self_configuration.mdx";
+
+Just as you can tune settings in $PG, $TIMESCALE_DB provides a number of configuration
+settings that may be useful to your specific installation and performance needs. These can
+also be set within the `postgresql.conf` file or as command-line parameters
+when starting $PG.
+
+## Query Planning and Execution
+
+### `timescaledb.enable_chunkwise_aggregation (bool)`
+If enabled, aggregations are converted into partial aggregations during query
+planning. The first part of the aggregation is executed on a per-chunk basis.
+Then, these partial results are combined and finalized. Splitting aggregations
+decreases the size of the created hash tables and increases data locality, which
+speeds up queries.
+
+### `timescaledb.vectorized_aggregation (bool)`
+Enables or disables the vectorized optimizations in the query executor. For
+example, the `sum()` aggregation function on compressed chunks can be optimized
+in this way.
+
+### `timescaledb.enable_merge_on_cagg_refresh (bool)`
+
+Set to `ON` to dramatically decrease the amount of data written on a continuous aggregate
+in the presence of a small number of changes, reduce the i/o cost of refreshing a
+[continuous aggregate][continuous-aggregates], and generate fewer Write-Ahead Logs (WAL). Only works for continuous aggregates that don't have compression enabled.
+
+## Policies
+
+### `timescaledb.max_background_workers (int)`
+
+Max background worker processes allocated to TimescaleDB. Set to at least 1 +
+the number of databases loaded with a TimescaleDB extension in a $PG
+instance. Default value is 16.
+
+
+
+## Hypercore features
+
+### `timescaledb.default_hypercore_use_access_method (bool)`
+
+The default value for `hypercore_use_access_method` for functions that have this parameter. This function is in `user` context, meaning that any user can set it for the session. The default value is `false`.
+
+## Administration
+
+### `timescaledb.restoring (bool)`
+
+Set TimescaleDB in restoring mode. It is disabled by default.
+
+### `timescaledb.license (string)`
+
+Change access to features based on the TimescaleDB license in use. For example,
+setting `timescaledb.license` to `apache` limits TimescaleDB to features that
+are implemented under the Apache 2 license. The default value is `timescale`,
+which allows access to all features.
+
+### `timescaledb.telemetry_level (enum)`
+
+Telemetry settings level. Level used to determine which telemetry to
+send. Can be set to `off` or `basic`. Defaults to `basic`.
+
+### `timescaledb.last_tuned (string)`
+
+Records last time `timescaledb-tune` ran.
+
+### `timescaledb.last_tuned_version (string)`
+
+Version of `timescaledb-tune` used to tune when it runs.
+
+[continuous-aggregates]: /use-timescale/:currentVersion:/continuous-aggregates/
diff --git a/_partials/_timescaledb-gucs.md b/_partials/_timescaledb-gucs.md
new file mode 100644
index 0000000000..8850868b93
--- /dev/null
+++ b/_partials/_timescaledb-gucs.md
@@ -0,0 +1,72 @@
+
+IAIN: Can we update the structure to follow the same format as the other API ref please. For example:
+
+| Name | Type | Default | Required | Description |
+| -- | -- | -- |---------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| `auto_sparse_indexes` | `BOOLEAN` |`&ts_guc_auto_sparse_indexes` (Don't think this is resolving correctly) | ✖ | Set to `true` to create [sparse indexes][sparse-indexes] on the $HYPERTABLE columns used as index keys when a chunk is converted to the columnstore. You must enable `auto_sparse_indexes` before the chunk is converted. |
+
+
+IAIN: current version below
+
+| Name | Type | Short Description | Short Description | Value |
+| --- | --- | --- | --- | --- |
+| `auto_sparse_indexes` | `BOOLEAN` | Create sparse indexes on compressed chunks | The hypertable columns that are used as index keys will have suitable sparse indexes when compressed. Must be set at the moment of chunk compression | `&ts_guc_auto_sparse_indexes` |
+| `bgw_log_level` | `ENUM` | Log level for the background worker subsystem | Log level for the scheduler and workers of the background worker subsystem. Requires configuration reload to change. | `/* bootValue= */ WARNING` |
+| `compress_truncate_behaviour` | `ENUM` | Define behaviour of truncate after compression | Defines how truncate behaves at the end of compression. 'truncate_only' forces truncation. 'truncate_disabled' deletes rows instead of truncate. 'truncate_or_delete' allows falling back to deletion. | `COMPRESS_TRUNCATE_ONLY` |
+| `compression_batch_size_limit` | `INTEGER` | The max number of tuples that can be batched together during ""compression | Setting this option to a number between 1 and 999 will force compression to limit the size of compressed batches to that amount of uncompressed tuples.Setting this to 0 defaults to the max batch size of 1000. | `1000` |
+| `default_hypercore_use_access_method` | `BOOLEAN` | gettext_noop("Enable to always use Hypercore TAM when compressing.") | gettext_noop(Sets the global default for using Hypercore TAM when compressing chunks.) | `false` |
+| `enable_bool_compression` | `BOOLEAN` | Enable bool compression functionality | Enable bool compression | `true` |
+| `enable_bulk_decompression` | `BOOLEAN` | Enable decompression of the entire compressed batches | Increases throughput of decompression | `&ts_guc_enable_bulk_decompression` |
+| `enable_cagg_reorder_groupby` | `BOOLEAN` | Enable group by reordering | Enable group by clause reordering for continuous aggregates | `true` |
+| `enable_cagg_sort_pushdown` | `BOOLEAN` | Enable sort pushdown for continuous aggregates | Enable pushdown of ORDER BY clause for continuous aggregates | `true` |
+| `enable_cagg_watermark_constify` | `BOOLEAN` | Enable cagg watermark constify | Enable constifying cagg watermark for real-time caggs | `true` |
+| `enable_cagg_window_functions` | `BOOLEAN` | Enable window functions in continuous aggregates | Allow window functions in continuous aggregate views | `false` |
+| `enable_chunk_append` | `BOOLEAN` | Enable chunk append node | Enable using chunk append node | `true` |
+| `enable_chunk_skipping` | `BOOLEAN` | Enable chunk skipping functionality | Enable using chunk column stats to filter chunks based on column filters | `false` |
+| `enable_chunkwise_aggregation` | `BOOLEAN` | Enable chunk-wise aggregation | Enable the pushdown of aggregations to the chunk level | `true` |
+| `enable_columnarscan` | `BOOLEAN` | Enable columnar-optimized scans for supported access methods | A columnar scan replaces sequence scans for columnar-oriented storage and enables storage-specific optimizations like vectorized filters. Disabling columnar scan will make PostgreSQL fall back to regular sequence scans. | `true` |
+| `enable_compressed_direct_batch_delete` | `BOOLEAN` | Enable direct deletion of compressed batches | Enable direct batch deletion in compressed chunks | `true` |
+| `enable_compressed_skipscan` | `BOOLEAN` | Enable SkipScan for compressed chunks | Enable SkipScan for distinct inputs over compressed chunks | `true` |
+| `enable_compression_indexscan` | `BOOLEAN` | Enable compression to take indexscan path | Enable indexscan during compression | `&ts_guc_enable_compression_indexscan` |
+| `enable_compression_ratio_warnings` | `BOOLEAN` | Enable warnings for poor compression ratio | Enable warnings for poor compression ratio | `true` |
+| `enable_compression_wal_markers` | `BOOLEAN` | Enable WAL markers for compression ops | Enable the generation of markers in the WAL stream which mark the start and end of compression operations | `true` |
+| `enable_compressor_batch_limit` | `BOOLEAN` | Enable compressor batch limit | Enable compressor batch limit for compressors which can go over the allocation limit (1 GB). This feature will limit those compressors by reducing the size of the batch and thus avoid hitting the limit. | `false` |
+| `enable_constraint_aware_append` | `BOOLEAN` | Enable constraint-aware append scans | Enable constraint exclusion at execution time | `true` |
+| `enable_constraint_exclusion` | `BOOLEAN` | Enable constraint exclusion | Enable planner constraint exclusion | `true` |
+| `enable_custom_hashagg` | `BOOLEAN` | Enable custom hash aggregation | Enable creating custom hash aggregation plans | `false` |
+| `enable_decompression_sorted_merge` | `BOOLEAN` | Enable compressed batches heap merge | Enable the merge of compressed batches to preserve the compression order by | `true` |
+| `enable_delete_after_compression` | `BOOLEAN` | Delete all rows after compression instead of truncate | Delete all rows after compression instead of truncate | `false` |
+| `enable_deprecation_warnings` | `BOOLEAN` | Enable warnings when using deprecated functionality | "Enable warnings when using deprecated functionality" | `true` |
+| `enable_dml_decompression` | `BOOLEAN` | Enable DML decompression | Enable DML decompression when modifying compressed hypertable | `true` |
+| `enable_dml_decompression_tuple_filtering` | `BOOLEAN` | Enable DML decompression tuple filtering | Recheck tuples during DML decompression to only decompress batches with matching tuples | `true` |
+| `enable_event_triggers` | `BOOLEAN` | Enable event triggers for chunks creation | Enable event triggers for chunks creation | `false` |
+| `enable_exclusive_locking_recompression` | `BOOLEAN` | Enable exclusive locking recompression | Enable getting exclusive lock on chunk during segmentwise recompression | `false` |
+| `enable_foreign_key_propagation` | `BOOLEAN` | Enable foreign key propagation | Adjust foreign key lookup queries to target whole hypertable | `true` |
+| `enable_job_execution_logging` | `BOOLEAN` | Enable job execution logging | Retain job run status in logging table | `false` |
+| `enable_merge_on_cagg_refresh` | `BOOLEAN` | Enable MERGE statement on cagg refresh | Enable MERGE statement on cagg refresh | `false` |
+| `enable_now_constify` | `BOOLEAN` | Enable now() constify | Enable constifying now() in query constraints | `true` |
+| `enable_null_compression` | `BOOLEAN` | Debug only flag to enable NULL compression | Enable null compression | `true` |
+| `enable_optimizations` | `BOOLEAN` | Enable TimescaleDB query optimizations | "Enable TimescaleDB query optimizations" | `true` |
+| `enable_ordered_append` | `BOOLEAN` | Enable ordered append scans | Enable ordered append optimization for queries that are ordered by the time dimension | `true` |
+| `enable_parallel_chunk_append` | `BOOLEAN` | Enable parallel chunk append node | Enable using parallel aware chunk append node | `true` |
+| `enable_qual_propagation` | `BOOLEAN` | Enable qualifier propagation | Enable propagation of qualifiers in JOINs | `true` |
+| `enable_runtime_exclusion` | `BOOLEAN` | Enable runtime chunk exclusion | Enable runtime chunk exclusion in ChunkAppend node | `true` |
+| `enable_segmentwise_recompression` | `BOOLEAN` | Enable segmentwise recompression functionality | Enable segmentwise recompression | `true` |
+| `enable_skipscan` | `BOOLEAN` | Enable SkipScan | Enable SkipScan for DISTINCT queries | `true` |
+| `enable_skipscan_for_distinct_aggregates` | `BOOLEAN` | Enable SkipScan for DISTINCT aggregates | Enable SkipScan for DISTINCT aggregates | `true` |
+| `enable_sparse_index_bloom` | `BOOLEAN` | Enable creation of the bloom1 sparse index on compressed chunks | This sparse index speeds up the equality queries on compressed columns | `&ts_guc_enable_sparse_index_bloom` |
+| `enable_tiered_reads` | `BOOLEAN` | Enable tiered data reads | Enable reading of tiered data by including a foreign table representing the data in the object storage into the query plan | `true` |
+| `enable_transparent_decompression` | `ENUM` | Enable transparent decompression | Enable transparent decompression when querying hypertable | `1` |
+| `enable_tss_callbacks` | `BOOLEAN` | Enable ts_stat_statements callbacks | Enable ts_stat_statements callbacks | `true` |
+| `enable_vectorized_aggregation` | `BOOLEAN` | Enable vectorized aggregation | Enable vectorized aggregation for compressed data | `true` |
+| `hypercore_copy_to_behavior` | `ENUM` | The behavior of COPY TO on a hypercore table | Set to 'all_data' to return both compressed and uncompressed data via the Hypercore table when using COPY TO. Set to 'no_compressed_data' to skip compressed data. | `/* bootValue= */ HYPERCORE_COPY_NO_COMPRESSED_DATA` |
+| `hypercore_indexam_whitelist` | `STRING` | gettext_noop( "Whitelist for index access methods supported by hypercore.") | gettext_noop( List of index access method names supported by hypercore.) | `/* Value= */ "btree` |
+| `materializations_per_refresh_window` | `INTEGER` | Max number of materializations per cagg refresh window | The maximal number of individual refreshes per cagg refresh. If more refreshes need to be performed | `&ts_guc_cagg_max_individual_materializations` |
+| `max_cached_chunks_per_hypertable` | `INTEGER` | Maximum cached chunks | Maximum number of chunks stored in the cache | `1024` |
+| `max_open_chunks_per_insert` | `INTEGER` | Maximum open chunks per insert | Maximum number of open chunk tables per insert | `1024` |
+| `max_tuples_decompressed_per_dml_transaction` | `INTEGER` | The max number of tuples that can be decompressed during an ""INSERT | UPDATE | `" If the number of tuples exceeds this value` |
+| `restoring` | `BOOLEAN` | Enable restoring mode for timescaledb | In restoring mode all timescaledb internal hooks are disabled. This mode is required for restoring logical dumps of databases with timescaledb. | `false` |
+| `telemetry_level` | `ENUM` | Telemetry settings level | Level used to determine which telemetry to send | `TELEMETRY_DEFAULT` |
+
+
+[sparse-indexes]: /use-timescale/:currentVersion:/schema-management/indexing/#best-practices-for-indexing
diff --git a/api/configuration.md b/api/configuration.md
deleted file mode 100644
index 4ba827c4b1..0000000000
--- a/api/configuration.md
+++ /dev/null
@@ -1,15 +0,0 @@
----
-title: Configuration
-excerpt: Use the default PostgreSQL server configuration settings for your Tiger Cloud service, or customize them as needed
-keywords: [configure]
-products: [self_hosted]
----
-
-# Configuration
-
-By default, TimescaleDB uses the default PostgreSQL server configuration
-settings. You can also change both PostgreSQL and TimescaleDB configuration
-settings yourself. For a list of settings, see the
-[configuration how-to guide][configuration-how-to].
-
-[configuration-how-to]: /self-hosted/:currentVersion:/configuration/about-configuration/
diff --git a/api/configuration/gucs.md b/api/configuration/gucs.md
new file mode 100644
index 0000000000..0c983abdb4
--- /dev/null
+++ b/api/configuration/gucs.md
@@ -0,0 +1,13 @@
+---
+title: Grand Unified Configuration (GUC) parameters
+excerpt: Optimize the behavior of TimescaleDB using Grand Unified Configuration (GUC) parameters
+keywords: [GUC, Configuration]
+---
+
+import TsdbGucsList from "versionContent/_partials/_timescaledb-gucs.mdx";
+
+# Grand Unified Configuration (GUC) parameters
+
+You use the following Grand Unified Configuration (GUC) parameters to optimize the behavior of your $SERVICE_LONG.
+
+
diff --git a/api/configuration/index.md b/api/configuration/index.md
new file mode 100644
index 0000000000..c69236f058
--- /dev/null
+++ b/api/configuration/index.md
@@ -0,0 +1,17 @@
+---
+title: Service configuration
+excerpt: Use the default PostgreSQL server configuration settings for your Tiger Cloud service, or customize them as needed
+keywords: [configure]
+products: [self_hosted, cloud]
+---
+
+# $SERVICE_LONG configuration
+
+$SERVICE_LONG use the default $PG server configuration settings. You can optimize your $SERVICE_SHORT configuration
+using the following $TIMESCALE_DB and Grand Unified Configuration (GUC) parameters.
+
+* [$TIGER_POSTGRES configuration and tuning][tigerpostgres-config]
+* [Grand Unified Configuration (GUC) parameters][gucs]
+
+[tigerpostgres-config]: /api/:currentVersion:/configuration/tiger-postgres/
+[gucs]: /api/:currentVersion:/configuration/gucs/
diff --git a/api/configuration/tiger-postgres.md b/api/configuration/tiger-postgres.md
new file mode 100644
index 0000000000..d57a61dfb4
--- /dev/null
+++ b/api/configuration/tiger-postgres.md
@@ -0,0 +1,20 @@
+---
+title: Tiger Postgres configuration and tuning
+excerpt: Configure the Tiger Postgres settings related to policies, query planning and execution, distributed
+ hypertables, and
+ administration
+products: [cloud]
+keywords: [configuration, settings]
+tags: [tune]
+---
+
+import TimescaleDBConfig from "versionContent/_partials/_timescaledb-config.mdx";
+import MultiNodeDeprecation from "versionContent/_partials/_multi-node-deprecation.mdx";
+
+# $TIGER_POSTGRES configuration and tuning
+
+Just as you can tune settings in $PG, $TIGER_POSTGRES provides a number of configuration
+settings that may be useful to your specific installation and performance needs.
+
+
+
diff --git a/api/page-index/page-index.js b/api/page-index/page-index.js
index 6dac56b337..8017a970dd 100644
--- a/api/page-index/page-index.js
+++ b/api/page-index/page-index.js
@@ -555,8 +555,20 @@ module.exports = [
],
},
{
+ title: "Service configuration",
href: "configuration",
- excerpt: "Configure PostgreSQL and TimescaleDB",
+ children: [
+ {
+ title: "Tiger Postgres configuration",
+ href: "tiger-postgres",
+ excerpt: "Configure PostgreSQL and TimescaleDB",
+ },
+ {
+ title: "Grand Unified Configuration (GUC) parameters",
+ href: "gucs",
+ excerpt: "Change the behaviour of TimescaleDB using GUCs",
+ },
+ ],
},
{
title: "Administration Functions",
diff --git a/self-hosted/configuration/timescaledb-config.md b/self-hosted/configuration/timescaledb-config.md
index c357981adf..76e0392826 100644
--- a/self-hosted/configuration/timescaledb-config.md
+++ b/self-hosted/configuration/timescaledb-config.md
@@ -6,51 +6,17 @@ keywords: [configuration, settings]
tags: [tune]
---
+import TimescaleDBConfig from "versionContent/_partials/_timescaledb-config.mdx";
import MultiNodeDeprecation from "versionContent/_partials/_multi-node-deprecation.mdx";
-import ConfigCloudSelf from "versionContent/_partials/_cloud_self_configuration.mdx";
-# TimescaleDB configuration and tuning
+# $TIGER_POSTGRES configuration and tuning
-Just as you can tune settings in PostgreSQL, TimescaleDB provides a number of configuration
+Just as you can tune settings in $PG, $TIMESCALE_DB provides a number of configuration
settings that may be useful to your specific installation and performance needs. These can
also be set within the `postgresql.conf` file or as command-line parameters
-when starting PostgreSQL.
+when starting $PG.
-## Query Planning and Execution
-
-### `timescaledb.enable_chunkwise_aggregation (bool)`
-If enabled, aggregations are converted into partial aggregations during query
-planning. The first part of the aggregation is executed on a per-chunk basis.
-Then, these partial results are combined and finalized. Splitting aggregations
-decreases the size of the created hash tables and increases data locality, which
-speeds up queries.
-
-### `timescaledb.vectorized_aggregation (bool)`
-Enables or disables the vectorized optimizations in the query executor. For
-example, the `sum()` aggregation function on compressed chunks can be optimized
-in this way.
-
-### `timescaledb.enable_merge_on_cagg_refresh (bool)`
-
-Set to `ON` to dramatically decrease the amount of data written on a continuous aggregate
-in the presence of a small number of changes, reduce the i/o cost of refreshing a
-[continuous aggregate][continuous-aggregates], and generate fewer Write-Ahead Logs (WAL). Only works for continuous aggregates that don't have compression enabled.
-
-## Policies
-
-### `timescaledb.max_background_workers (int)`
-
-Max background worker processes allocated to TimescaleDB. Set to at least 1 +
-the number of databases loaded with a TimescaleDB extension in a PostgreSQL
-instance. Default value is 16.
-
-
-
-## Hypercore features
-
-### `timescaledb.default_hypercore_use_access_method (bool)`
-
-The default value for `hypercore_use_access_method` for functions that have this parameter. This function is in `user` context, meaning that any user can set it for the session. The default value is `false`.
+
## Distributed hypertables
@@ -105,37 +71,10 @@ can be either `copy`, `cursor`, or `auto`. The default is `auto`.
Specifies the path used to search user certificates and keys when
connecting to data nodes using certificate authentication. Defaults to
-`timescaledb/certs` under the PostgreSQL data directory.
+`timescaledb/certs` under the $PG data directory.
### `timescaledb.passfile (string)` [
Specifies the name of the file where passwords are stored and when
connecting to data nodes using password authentication.
-## Administration
-
-### `timescaledb.restoring (bool)`
-
-Set TimescaleDB in restoring mode. It is disabled by default.
-
-### `timescaledb.license (string)`
-
-Change access to features based on the TimescaleDB license in use. For example,
-setting `timescaledb.license` to `apache` limits TimescaleDB to features that
-are implemented under the Apache 2 license. The default value is `timescale`,
-which allows access to all features.
-
-### `timescaledb.telemetry_level (enum)`
-
-Telemetry settings level. Level used to determine which telemetry to
-send. Can be set to `off` or `basic`. Defaults to `basic`.
-
-### `timescaledb.last_tuned (string)`
-
-Records last time `timescaledb-tune` ran.
-
-### `timescaledb.last_tuned_version (string)`
-
-Version of `timescaledb-tune` used to tune when it runs.
-
-[continuous-aggregates]: /use-timescale/:currentVersion:/continuous-aggregates/