From adc0aca93d5f0b72ae6a1647af11fed50e874cb2 Mon Sep 17 00:00:00 2001 From: Iain Date: Fri, 20 Jun 2025 14:39:30 +0200 Subject: [PATCH 1/2] chore: update chunk info information. --- _partials/_chunk-interval.md | 18 ++++++++++++++++++ .../hypertables/improve-query-performance.md | 7 ++----- use-timescale/hypertables/index.md | 11 +++++------ 3 files changed, 25 insertions(+), 11 deletions(-) create mode 100644 _partials/_chunk-interval.md diff --git a/_partials/_chunk-interval.md b/_partials/_chunk-interval.md new file mode 100644 index 0000000000..1e6d26c1f1 --- /dev/null +++ b/_partials/_chunk-interval.md @@ -0,0 +1,18 @@ +Postgres builds the index on the fly during ingestion. That means that to build a new entry on the index, +a significant portion of the index needs to be traversed during every row insertion. When the index does not fit +into memory, it is constantly flushed to disk and read back, which wastes IO resources which would otherwise +been used for writing the heap/WAL data to disk. + +The default chunk interval is 7 days. However, best practice is to set `chunk_interval` so that prior to processing, +the indexes for chunks currently being ingested into fit within 25% of main memory. For example, on a system with 64 +GB of memory, if index growth is approximately 2 GB per day, a 1-week chunk interval is appropriate. If index growth is +around 10 GB per day, use a 1-day interval. + +You set `chunk_interval` when you [create a $HYPERTABLE][hypertable-create-table], or by calling +[`set_chunk_time_interval`][chunk_interval] on an existing hypertable. + + + +[best-practices]: /use-timescale/:currentVersion:/hypertables/#best-practices-for-time-partitioning +[chunk_interval]: /api/:currentVersion:/hypertable/set_chunk_time_interval/ +[hypertable-create-table]: /api/:currentVersion:/hypertable/create_table/ diff --git a/use-timescale/hypertables/improve-query-performance.md b/use-timescale/hypertables/improve-query-performance.md index 0bb0e101ca..e85aaaa714 100644 --- a/use-timescale/hypertables/improve-query-performance.md +++ b/use-timescale/hypertables/improve-query-performance.md @@ -6,6 +6,7 @@ keywords: [hypertables, indexes, chunks] --- import OldCreateHypertable from "versionContent/_partials/_old-api-create-hypertable.mdx"; +import ChunkInterval from "versionContent/_partials/_chunk-interval.mdx"; # Improve hypertable and query performance @@ -27,11 +28,7 @@ Adjusting your hypertable chunk interval can improve performance in your databas 1. **Choose an optimum chunk interval** - The default chunk interval is 7 days. You can set a custom interval when you create a hypertable. - Best practice is that prior to processing, one chunk of data takes up 25% of main memory, including the indexes - from each active hypertable. For example, if you write approximately 2 GB of data per day to a database with 64 - GB of memory, set `chunk_interval` to 1 week. If you write approximately 10 GB of data per day on the same - machine, set the time interval to 1 day. For more information, see [best practices for time partitioning][best-practices]. + In the following example you create a table called `conditions` that stores time values in the `time` column and has chunks that store data for a `chunk_interval` of one day: diff --git a/use-timescale/hypertables/index.md b/use-timescale/hypertables/index.md index cc9381f962..da7a37e397 100644 --- a/use-timescale/hypertables/index.md +++ b/use-timescale/hypertables/index.md @@ -6,6 +6,7 @@ keywords: [hypertables] --- import HypertableIntro from 'versionContent/_partials/_hypertable-intro.mdx'; +import ChunkInterval from "versionContent/_partials/_chunk-interval.mdx"; # Hypertables @@ -63,11 +64,9 @@ to fit into memory so you can insert and query recent data without reading from disk. However, having too many small and sparsely filled chunks can affect query planning time and compression. -Best practice is to set `chunk_interval` so that prior to processing, one chunk of data -takes up 25% of main memory, including the indexes from each active $HYPERTABLE. -For example, if you write approximately 2 GB of data per day to a database with 64 GB of -memory, set `chunk_interval` to 1 week. If you write approximately 10 GB of data per day -on the same machine, set the time interval to 1 day. + + + For a detailed analysis of how to optimize your chunk sizes, see the [blog post on chunk time intervals][blog-chunk-time]. To learn how @@ -108,4 +107,4 @@ This section shows you: [pg-analyze]: https://www.postgresql.org/docs/current/sql-analyze.html [chunks_detailed_size]: /api/:currentVersion:/hypertable/chunks_detailed_size -[troubleshooting]: /use-timescale/:currentVersion:/hypertables/troubleshooting/ \ No newline at end of file +[troubleshooting]: /use-timescale/:currentVersion:/hypertables/troubleshooting/ From 3c3bac48638f243ba7050af7a3dab11bc423440b Mon Sep 17 00:00:00 2001 From: Iain Cox Date: Thu, 26 Jun 2025 14:00:51 +0200 Subject: [PATCH 2/2] Apply suggestions from code review Co-authored-by: Anastasiia Tovpeko <114177030+atovpeko@users.noreply.github.com> Signed-off-by: Iain Cox --- _partials/_chunk-interval.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/_partials/_chunk-interval.md b/_partials/_chunk-interval.md index 1e6d26c1f1..4f84258bfc 100644 --- a/_partials/_chunk-interval.md +++ b/_partials/_chunk-interval.md @@ -1,7 +1,7 @@ -Postgres builds the index on the fly during ingestion. That means that to build a new entry on the index, +$PG builds the index on the fly during ingestion. That means that to build a new entry on the index, a significant portion of the index needs to be traversed during every row insertion. When the index does not fit -into memory, it is constantly flushed to disk and read back, which wastes IO resources which would otherwise -been used for writing the heap/WAL data to disk. +into memory, it is constantly flushed to disk and read back. This wastes IO resources which would otherwise +be used for writing the heap/WAL data to disk. The default chunk interval is 7 days. However, best practice is to set `chunk_interval` so that prior to processing, the indexes for chunks currently being ingested into fit within 25% of main memory. For example, on a system with 64