diff --git a/dev-tools/omdb/src/bin/omdb/reconfigurator.rs b/dev-tools/omdb/src/bin/omdb/reconfigurator.rs index c56679f493c..e074fc25b8f 100644 --- a/dev-tools/omdb/src/bin/omdb/reconfigurator.rs +++ b/dev-tools/omdb/src/bin/omdb/reconfigurator.rs @@ -148,7 +148,7 @@ async fn cmd_reconfigurator_export( // See Nexus::blueprint_planning_context(). eprint!("assembling reconfigurator state ... "); let state = nexus_reconfigurator_preparation::reconfigurator_state_load( - opctx, datastore, + opctx, datastore, None, ) .await?; eprintln!("done"); diff --git a/dev-tools/omdb/tests/successes.out b/dev-tools/omdb/tests/successes.out index 7e20b803e86..81134d4e97d 100644 --- a/dev-tools/omdb/tests/successes.out +++ b/dev-tools/omdb/tests/successes.out @@ -1517,6 +1517,7 @@ parent: internal DNS version::: 1 external DNS version::: 2 target release min gen: 1 + nexus gen:::::::::::::: 1 PENDING MGS-MANAGED UPDATES: 0 @@ -1640,6 +1641,7 @@ parent: internal DNS version::: 1 external DNS version::: 2 target release min gen: 1 + nexus gen:::::::::::::: 1 PENDING MGS-MANAGED UPDATES: 0 @@ -1665,6 +1667,7 @@ to: blueprint ............. internal DNS version::: 1 (unchanged) external DNS version::: 2 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) diff --git a/dev-tools/reconfigurator-cli/tests/output/cmds-add-sled-no-disks-stdout b/dev-tools/reconfigurator-cli/tests/output/cmds-add-sled-no-disks-stdout index 9591408f905..9ed40ac1396 100644 --- a/dev-tools/reconfigurator-cli/tests/output/cmds-add-sled-no-disks-stdout +++ b/dev-tools/reconfigurator-cli/tests/output/cmds-add-sled-no-disks-stdout @@ -274,6 +274,7 @@ parent: dbcbd3d6-41ff-48ae-ac0b-1becc9b2fd21 internal DNS version::: 1 external DNS version::: 1 target release min gen: 1 + nexus gen:::::::::::::: 1 PENDING MGS-MANAGED UPDATES: 0 diff --git a/dev-tools/reconfigurator-cli/tests/output/cmds-example-stdout b/dev-tools/reconfigurator-cli/tests/output/cmds-example-stdout index 01284a774e8..32ea69f0743 100644 --- a/dev-tools/reconfigurator-cli/tests/output/cmds-example-stdout +++ b/dev-tools/reconfigurator-cli/tests/output/cmds-example-stdout @@ -399,6 +399,7 @@ parent: 02697f74-b14a-4418-90f0-c28b2a3a6aa9 internal DNS version::: 1 external DNS version::: 1 target release min gen: 1 + nexus gen:::::::::::::: 1 PENDING MGS-MANAGED UPDATES: 0 @@ -518,6 +519,7 @@ parent: 02697f74-b14a-4418-90f0-c28b2a3a6aa9 internal DNS version::: 1 external DNS version::: 1 target release min gen: 1 + nexus gen:::::::::::::: 1 PENDING MGS-MANAGED UPDATES: 0 @@ -551,7 +553,7 @@ T ENA ID PARENT > blueprint-plan ade5749d-bdf3-4fab-a8ae-00bea01b3a5a INFO skipping noop image source check for all sleds, reason: no target release is currently set WARN cannot issue more MGS-driven updates (no current artifacts) -INFO some zones not yet up-to-date, sled_id: 89d02b1b-478c-401a-8e28-7a26f74fa41b, zones_currently_updating: [ZoneCurrentlyUpdating { zone_id: b3c9c041-d2f0-4767-bdaf-0e52e9d7a013 (service), zone_kind: InternalNtp, reason: MissingInInventory { bp_image_source: InstallDataset } }] +INFO some zones not yet up-to-date, zones_currently_updating: [ZonePropagationIncomplete { zone_id: b3c9c041-d2f0-4767-bdaf-0e52e9d7a013 (service), zone_kind: InternalNtp, reason: MissingInInventory { bp_image_source: InstallDataset } }] generated blueprint 86db3308-f817-4626-8838-4085949a6a41 based on parent blueprint ade5749d-bdf3-4fab-a8ae-00bea01b3a5a planning report for blueprint 86db3308-f817-4626-8838-4085949a6a41: chicken switches: @@ -633,6 +635,7 @@ to: blueprint 86db3308-f817-4626-8838-4085949a6a41 internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) @@ -713,6 +716,7 @@ to: blueprint 86db3308-f817-4626-8838-4085949a6a41 internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) @@ -793,6 +797,7 @@ to: blueprint 02697f74-b14a-4418-90f0-c28b2a3a6aa9 internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) @@ -1032,6 +1037,7 @@ parent: 02697f74-b14a-4418-90f0-c28b2a3a6aa9 internal DNS version::: 1 external DNS version::: 1 target release min gen: 1 + nexus gen:::::::::::::: 1 PENDING MGS-MANAGED UPDATES: 0 @@ -1670,6 +1676,7 @@ to: blueprint 86db3308-f817-4626-8838-4085949a6a41 internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) diff --git a/dev-tools/reconfigurator-cli/tests/output/cmds-expunge-newly-added-external-dns-stdout b/dev-tools/reconfigurator-cli/tests/output/cmds-expunge-newly-added-external-dns-stdout index fea4459e751..3d8db0389d3 100644 --- a/dev-tools/reconfigurator-cli/tests/output/cmds-expunge-newly-added-external-dns-stdout +++ b/dev-tools/reconfigurator-cli/tests/output/cmds-expunge-newly-added-external-dns-stdout @@ -331,6 +331,7 @@ parent: 06c88262-f435-410e-ba98-101bed41ec27 internal DNS version::: 1 external DNS version::: 1 target release min gen: 1 + nexus gen:::::::::::::: 1 PENDING MGS-MANAGED UPDATES: 0 @@ -463,6 +464,7 @@ to: blueprint 366b0b68-d80e-4bc1-abd3-dc69837847e0 internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) @@ -1022,6 +1024,7 @@ parent: 3f00b694-1b16-4aaa-8f78-e6b3a527b434 internal DNS version::: 1 external DNS version::: 1 target release min gen: 1 + nexus gen:::::::::::::: 1 PENDING MGS-MANAGED UPDATES: 0 @@ -1039,7 +1042,7 @@ chicken switches: add zones with mupdate override: false * discretionary zones placed: - * 1 zone on sled 711ac7f8-d19e-4572-bdb9-e9b50f6e362a: external_dns + * external_dns zone on sled 711ac7f8-d19e-4572-bdb9-e9b50f6e362a from source install dataset * zone updates waiting on discretionary zones @@ -1163,6 +1166,7 @@ to: blueprint 9c998c1d-1a7b-440a-ae0c-40f781dea6e2 internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) @@ -1725,6 +1729,7 @@ parent: 366b0b68-d80e-4bc1-abd3-dc69837847e0 internal DNS version::: 1 external DNS version::: 1 target release min gen: 1 + nexus gen:::::::::::::: 1 PENDING MGS-MANAGED UPDATES: 0 @@ -1733,7 +1738,7 @@ chicken switches: add zones with mupdate override: false * discretionary zones placed: - * 1 zone on sled 711ac7f8-d19e-4572-bdb9-e9b50f6e362a: external_dns + * external_dns zone on sled 711ac7f8-d19e-4572-bdb9-e9b50f6e362a from source install dataset * zone updates waiting on discretionary zones @@ -1859,6 +1864,7 @@ to: blueprint 2ac8c740-444d-42ff-8d66-9812a7e51288 internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) diff --git a/dev-tools/reconfigurator-cli/tests/output/cmds-expunge-newly-added-internal-dns-stdout b/dev-tools/reconfigurator-cli/tests/output/cmds-expunge-newly-added-internal-dns-stdout index 8f759dc2f04..26df8f664c8 100644 --- a/dev-tools/reconfigurator-cli/tests/output/cmds-expunge-newly-added-internal-dns-stdout +++ b/dev-tools/reconfigurator-cli/tests/output/cmds-expunge-newly-added-internal-dns-stdout @@ -329,6 +329,7 @@ parent: 184f10b3-61cb-41ef-9b93-3489b2bac559 internal DNS version::: 1 external DNS version::: 1 target release min gen: 1 + nexus gen:::::::::::::: 1 PENDING MGS-MANAGED UPDATES: 0 @@ -461,6 +462,7 @@ to: blueprint 8da82a8e-bf97-4fbd-8ddd-9f6462732cf1 internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) @@ -820,6 +822,7 @@ to: blueprint 58d5e830-0884-47d8-a7cd-b2b3751adeb4 internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) @@ -1053,7 +1056,7 @@ chicken switches: add zones with mupdate override: false * discretionary zones placed: - * 1 zone on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: internal_dns + * internal_dns zone on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c from source install dataset * zone updates waiting on discretionary zones @@ -1177,6 +1180,7 @@ to: blueprint af934083-59b5-4bf6-8966-6fb5292c29e1 internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) diff --git a/dev-tools/reconfigurator-cli/tests/output/cmds-host-phase-2-source-stdout b/dev-tools/reconfigurator-cli/tests/output/cmds-host-phase-2-source-stdout index 174b44d1c93..4caba4fd7e6 100644 --- a/dev-tools/reconfigurator-cli/tests/output/cmds-host-phase-2-source-stdout +++ b/dev-tools/reconfigurator-cli/tests/output/cmds-host-phase-2-source-stdout @@ -150,6 +150,7 @@ to: blueprint 8da82a8e-bf97-4fbd-8ddd-9f6462732cf1 internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) @@ -430,6 +431,7 @@ to: blueprint 58d5e830-0884-47d8-a7cd-b2b3751adeb4 internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) @@ -709,6 +711,7 @@ parent: 8da82a8e-bf97-4fbd-8ddd-9f6462732cf1 internal DNS version::: 1 external DNS version::: 1 target release min gen: 1 + nexus gen:::::::::::::: 1 PENDING MGS-MANAGED UPDATES: 0 @@ -860,6 +863,7 @@ to: blueprint af934083-59b5-4bf6-8966-6fb5292c29e1 internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) @@ -1140,6 +1144,7 @@ to: blueprint df06bb57-ad42-4431-9206-abff322896c7 internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) @@ -1419,6 +1424,7 @@ parent: af934083-59b5-4bf6-8966-6fb5292c29e1 internal DNS version::: 1 external DNS version::: 1 target release min gen: 1 + nexus gen:::::::::::::: 1 PENDING MGS-MANAGED UPDATES: 0 diff --git a/dev-tools/reconfigurator-cli/tests/output/cmds-mupdate-update-flow-stdout b/dev-tools/reconfigurator-cli/tests/output/cmds-mupdate-update-flow-stdout index 407c97e9ba5..c7e01d118ed 100644 --- a/dev-tools/reconfigurator-cli/tests/output/cmds-mupdate-update-flow-stdout +++ b/dev-tools/reconfigurator-cli/tests/output/cmds-mupdate-update-flow-stdout @@ -508,6 +508,7 @@ chicken switches: * waiting on MUPdate overrides * MUPdate overrides exist * zone updates waiting on MUPdate overrides +* waiting to update top-level nexus_generation: pending non-nexus zone updates @@ -684,6 +685,7 @@ to: blueprint a5a8f242-ffa5-473c-8efd-2acf2dc0b736 internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) * target release min gen: 1 -> 3 + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) @@ -883,6 +885,7 @@ chicken switches: * waiting on MUPdate overrides * MUPdate overrides exist * zone updates waiting on MUPdate overrides +* waiting to update top-level nexus_generation: pending non-nexus zone updates > blueprint-diff latest @@ -897,6 +900,7 @@ to: blueprint 626487fa-7139-45ec-8416-902271fc730b internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 3 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) @@ -1107,6 +1111,7 @@ chicken switches: * waiting on MUPdate overrides * MUPdate overrides exist * zone updates waiting on MUPdate overrides +* waiting to update top-level nexus_generation: pending non-nexus zone updates @@ -1231,6 +1236,7 @@ to: blueprint c1a0d242-9160-40f4-96ae-61f8f40a0b1b internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) * target release min gen: 3 -> 4 + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) @@ -1396,6 +1402,7 @@ chicken switches: * waiting on MUPdate overrides * MUPdate overrides exist * zone updates waiting on MUPdate overrides +* waiting to update top-level nexus_generation: pending non-nexus zone updates > blueprint-show latest @@ -1560,6 +1567,7 @@ parent: c1a0d242-9160-40f4-96ae-61f8f40a0b1b internal DNS version::: 1 external DNS version::: 1 target release min gen: 4 + nexus gen:::::::::::::: 1 PENDING MGS-MANAGED UPDATES: 0 @@ -1571,6 +1579,7 @@ chicken switches: * waiting on MUPdate overrides * MUPdate overrides exist * zone updates waiting on MUPdate overrides +* waiting to update top-level nexus_generation: pending non-nexus zone updates @@ -1635,6 +1644,7 @@ to: blueprint afb09faf-a586-4483-9289-04d4f1d8ba23 internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 4 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) @@ -1814,6 +1824,7 @@ chicken switches: * waiting on MUPdate overrides * MUPdate overrides exist * zone updates waiting on MUPdate overrides +* waiting to update top-level nexus_generation: pending non-nexus zone updates > blueprint-show latest @@ -1978,6 +1989,7 @@ parent: afb09faf-a586-4483-9289-04d4f1d8ba23 internal DNS version::: 1 external DNS version::: 1 target release min gen: 4 + nexus gen:::::::::::::: 1 PENDING MGS-MANAGED UPDATES: 0 @@ -1990,6 +2002,7 @@ chicken switches: * waiting on MUPdate overrides * MUPdate overrides exist * zone updates waiting on MUPdate overrides +* waiting to update top-level nexus_generation: pending non-nexus zone updates @@ -2059,6 +2072,7 @@ to: blueprint ce365dff-2cdb-4f35-a186-b15e20e1e700 internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 4 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) @@ -2211,7 +2225,9 @@ chicken switches: * skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 6 zones are already from artifacts * 1 pending MGS update: * model0:serial0: RotBootloader(PendingMgsUpdateRotBootloaderDetails { expected_stage0_version: ArtifactVersion("0.0.1"), expected_stage0_next_version: NoValidVersion }) +* only placed 0/2 desired nexus zones * zone updates waiting on pending MGS updates (RoT / SP / Host OS / etc.) +* waiting to update top-level nexus_generation: pending non-nexus zone updates > blueprint-show latest @@ -2376,6 +2392,7 @@ parent: ce365dff-2cdb-4f35-a186-b15e20e1e700 internal DNS version::: 1 external DNS version::: 1 target release min gen: 4 + nexus gen:::::::::::::: 1 PENDING MGS-MANAGED UPDATES: 1 Pending MGS-managed updates (all baseboards): @@ -2393,7 +2410,9 @@ chicken switches: * skipping noop zone image source check on sled d81c6a84-79b8-4958-ae41-ea46c9b19763: all 6 zones are already from artifacts * 1 pending MGS update: * model0:serial0: RotBootloader(PendingMgsUpdateRotBootloaderDetails { expected_stage0_version: ArtifactVersion("0.0.1"), expected_stage0_next_version: NoValidVersion }) +* only placed 0/2 desired nexus zones * zone updates waiting on pending MGS updates (RoT / SP / Host OS / etc.) +* waiting to update top-level nexus_generation: pending non-nexus zone updates @@ -2409,6 +2428,7 @@ to: blueprint 8f2d1f39-7c88-4701-aa43-56bf281b28c1 internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 4 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) @@ -2613,7 +2633,7 @@ INFO skipping board for MGS-driven update, serial_number: serial0, part_number: INFO skipping board for MGS-driven update, serial_number: serial1, part_number: model1 INFO skipping board for MGS-driven update, serial_number: serial2, part_number: model2 INFO ran out of boards for MGS-driven update -INFO some zones not yet up-to-date, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, zones_currently_updating: [ZoneCurrentlyUpdating { zone_id: 0c71b3b2-6ceb-4e8f-b020-b08675e83038 (service), zone_kind: Nexus, reason: ImageSourceMismatch { bp_image_source: Artifact { version: Available { version: ArtifactVersion("1.0.0") }, hash: ArtifactHash("0e32b4a3e5d3668bb1d6a16fb06b74dc60b973fa479dcee0aae3adbb52bf1388") }, inv_image_source: InstallDataset } }, ZoneCurrentlyUpdating { zone_id: 427ec88f-f467-42fa-9bbb-66a91a36103c (service), zone_kind: InternalDns, reason: ImageSourceMismatch { bp_image_source: Artifact { version: Available { version: ArtifactVersion("1.0.0") }, hash: ArtifactHash("ffbf1373f7ee08dddd74c53ed2a94e7c4c572a982d3a9bc94000c6956b700c6a") }, inv_image_source: InstallDataset } }, ZoneCurrentlyUpdating { zone_id: 5199c033-4cf9-4ab6-8ae7-566bd7606363 (service), zone_kind: Crucible, reason: ImageSourceMismatch { bp_image_source: Artifact { version: Available { version: ArtifactVersion("1.0.0") }, hash: ArtifactHash("6f17cf65fb5a5bec5542dd07c03cd0acc01e59130f02c532c8d848ecae810047") }, inv_image_source: InstallDataset } }, ZoneCurrentlyUpdating { zone_id: 6444f8a5-6465-4f0b-a549-1993c113569c (service), zone_kind: InternalNtp, reason: ImageSourceMismatch { bp_image_source: Artifact { version: Available { version: ArtifactVersion("1.0.0") }, hash: ArtifactHash("67593d686ed04a1709f93972b71f4ebc148a9362120f65d239943e814a9a7439") }, inv_image_source: InstallDataset } }, ZoneCurrentlyUpdating { zone_id: 803bfb63-c246-41db-b0da-d3b87ddfc63d (service), zone_kind: ExternalDns, reason: ImageSourceMismatch { bp_image_source: Artifact { version: Available { version: ArtifactVersion("1.0.0") }, hash: ArtifactHash("ccca13ed19b8731f9adaf0d6203b02ea3b9ede4fa426b9fac0a07ce95440046d") }, inv_image_source: InstallDataset } }, ZoneCurrentlyUpdating { zone_id: ba4994a8-23f9-4b1a-a84f-a08d74591389 (service), zone_kind: CruciblePantry, reason: ImageSourceMismatch { bp_image_source: Artifact { version: Available { version: ArtifactVersion("1.0.0") }, hash: ArtifactHash("21f0ada306859c23917361f2e0b9235806c32607ec689c7e8cf16bb898bc5a02") }, inv_image_source: InstallDataset } }] +INFO some zones not yet up-to-date, zones_currently_updating: [ZonePropagationIncomplete { zone_id: 0c71b3b2-6ceb-4e8f-b020-b08675e83038 (service), zone_kind: Nexus, reason: ImageSourceMismatch { bp_image_source: Artifact { version: Available { version: ArtifactVersion("1.0.0") }, hash: ArtifactHash("0e32b4a3e5d3668bb1d6a16fb06b74dc60b973fa479dcee0aae3adbb52bf1388") }, inv_image_source: InstallDataset } }, ZonePropagationIncomplete { zone_id: 427ec88f-f467-42fa-9bbb-66a91a36103c (service), zone_kind: InternalDns, reason: ImageSourceMismatch { bp_image_source: Artifact { version: Available { version: ArtifactVersion("1.0.0") }, hash: ArtifactHash("ffbf1373f7ee08dddd74c53ed2a94e7c4c572a982d3a9bc94000c6956b700c6a") }, inv_image_source: InstallDataset } }, ZonePropagationIncomplete { zone_id: 5199c033-4cf9-4ab6-8ae7-566bd7606363 (service), zone_kind: Crucible, reason: ImageSourceMismatch { bp_image_source: Artifact { version: Available { version: ArtifactVersion("1.0.0") }, hash: ArtifactHash("6f17cf65fb5a5bec5542dd07c03cd0acc01e59130f02c532c8d848ecae810047") }, inv_image_source: InstallDataset } }, ZonePropagationIncomplete { zone_id: 6444f8a5-6465-4f0b-a549-1993c113569c (service), zone_kind: InternalNtp, reason: ImageSourceMismatch { bp_image_source: Artifact { version: Available { version: ArtifactVersion("1.0.0") }, hash: ArtifactHash("67593d686ed04a1709f93972b71f4ebc148a9362120f65d239943e814a9a7439") }, inv_image_source: InstallDataset } }, ZonePropagationIncomplete { zone_id: 803bfb63-c246-41db-b0da-d3b87ddfc63d (service), zone_kind: ExternalDns, reason: ImageSourceMismatch { bp_image_source: Artifact { version: Available { version: ArtifactVersion("1.0.0") }, hash: ArtifactHash("ccca13ed19b8731f9adaf0d6203b02ea3b9ede4fa426b9fac0a07ce95440046d") }, inv_image_source: InstallDataset } }, ZonePropagationIncomplete { zone_id: ba4994a8-23f9-4b1a-a84f-a08d74591389 (service), zone_kind: CruciblePantry, reason: ImageSourceMismatch { bp_image_source: Artifact { version: Available { version: ArtifactVersion("1.0.0") }, hash: ArtifactHash("21f0ada306859c23917361f2e0b9235806c32607ec689c7e8cf16bb898bc5a02") }, inv_image_source: InstallDataset } }, ZonePropagationIncomplete { zone_id: 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6 (service), zone_kind: Nexus, reason: ImageSourceMismatch { bp_image_source: Artifact { version: Available { version: ArtifactVersion("2.0.0") }, hash: ArtifactHash("e9b7035f41848a987a798c15ac424cc91dd662b1af0920d58d8aa1ebad7467b6") }, inv_image_source: InstallDataset } }, ZonePropagationIncomplete { zone_id: 75b220ba-a0f4-4872-8202-dc7c87f062d0 (service), zone_kind: CruciblePantry, reason: ImageSourceMismatch { bp_image_source: Artifact { version: Available { version: ArtifactVersion("2.0.0") }, hash: ArtifactHash("3ff26dad96faa8f67251f5de40458b4f809d536bfe8572134da0e42c2fa12674") }, inv_image_source: InstallDataset } }, ZonePropagationIncomplete { zone_id: ea5b4030-b52f-44b2-8d70-45f15f987d01 (service), zone_kind: InternalDns, reason: ImageSourceMismatch { bp_image_source: Artifact { version: Available { version: ArtifactVersion("2.0.0") }, hash: ArtifactHash("de30657a72b066b8ef1f56351a0a5d4d7000da0a62c4be9b2e949a107ca8a389") }, inv_image_source: InstallDataset } }, ZonePropagationIncomplete { zone_id: f10a4fb9-759f-4a65-b25e-5794ad2d07d8 (service), zone_kind: InternalNtp, reason: ImageSourceMismatch { bp_image_source: Artifact { version: Available { version: ArtifactVersion("2.0.0") }, hash: ArtifactHash("d76e26198daed69cdae04490d7477f8c842e0dbe37d463eac0d0a8d3fb803095") }, inv_image_source: InstallDataset } }, ZonePropagationIncomplete { zone_id: f55647d4-5500-4ad3-893a-df45bd50d622 (service), zone_kind: Crucible, reason: ImageSourceMismatch { bp_image_source: Artifact { version: Available { version: ArtifactVersion("2.0.0") }, hash: ArtifactHash("866f6a7c2e51c056fb722b5113e80181cc9cd8b712a0d3dbf1edc4ce29e5229e") }, inv_image_source: InstallDataset } }, ZonePropagationIncomplete { zone_id: f6ec9c67-946a-4da3-98d5-581f72ce8bf0 (service), zone_kind: ExternalDns, reason: ImageSourceMismatch { bp_image_source: Artifact { version: Available { version: ArtifactVersion("2.0.0") }, hash: ArtifactHash("f282c45771429f7bebf71f0cc668521066db57c6bb07fcfccdfb44825d3d930f") }, inv_image_source: InstallDataset } }] generated blueprint 12d602a6-5ab4-487a-b94e-eb30cdf30300 based on parent blueprint 8f2d1f39-7c88-4701-aa43-56bf281b28c1 planning report for blueprint 12d602a6-5ab4-487a-b94e-eb30cdf30300: chicken switches: @@ -2624,6 +2644,8 @@ chicken switches: * noop converting host phase 2 slot B to Artifact on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c * noop converting host phase 2 slot B to Artifact on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 * noop converting host phase 2 slot B to Artifact on sled d81c6a84-79b8-4958-ae41-ea46c9b19763 +* only placed 0/2 desired nexus zones +* waiting to update top-level nexus_generation: pending non-nexus zone updates > blueprint-diff latest @@ -2784,6 +2806,7 @@ to: blueprint 12d602a6-5ab4-487a-b94e-eb30cdf30300 internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 4 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) @@ -2965,6 +2988,7 @@ chicken switches: * waiting on MUPdate overrides * MUPdate overrides exist * zone updates waiting on MUPdate overrides +* waiting to update top-level nexus_generation: pending non-nexus zone updates > blueprint-diff latest @@ -3047,6 +3071,7 @@ to: blueprint 61a93ea3-c872-48e0-aace-e86b0c52b839 internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) * target release min gen: 4 -> 5 + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) @@ -3195,7 +3220,9 @@ planning report for blueprint 27e755bc-dc10-4647-853c-f89bb3a15a2c: * adding zones despite MUPdate override, as specified by the `add_zones_with_mupdate_override` chicken switch * discretionary zone placement waiting for NTP zones on sleds: c3bc4c6d-fdde-4fc4-8493-89d2a1e5ee6b * missing NTP zone on sled c3bc4c6d-fdde-4fc4-8493-89d2a1e5ee6b +* only placed 0/2 desired nexus zones * zone updates waiting on MUPdate overrides +* waiting to update top-level nexus_generation: pending non-nexus zone updates > blueprint-diff latest @@ -3272,6 +3299,7 @@ to: blueprint 27e755bc-dc10-4647-853c-f89bb3a15a2c internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 5 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) diff --git a/dev-tools/reconfigurator-cli/tests/output/cmds-noop-image-source-stdout b/dev-tools/reconfigurator-cli/tests/output/cmds-noop-image-source-stdout index e9b59689616..2ca831b009c 100644 --- a/dev-tools/reconfigurator-cli/tests/output/cmds-noop-image-source-stdout +++ b/dev-tools/reconfigurator-cli/tests/output/cmds-noop-image-source-stdout @@ -186,6 +186,7 @@ chicken switches: * waiting on MUPdate overrides * MUPdate overrides exist * zone updates waiting on MUPdate overrides +* waiting to update top-level nexus_generation: pending non-nexus zone updates @@ -351,6 +352,7 @@ to: blueprint 58d5e830-0884-47d8-a7cd-b2b3751adeb4 internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) @@ -552,6 +554,7 @@ chicken switches: * waiting on MUPdate overrides * MUPdate overrides exist * zone updates waiting on MUPdate overrides +* waiting to update top-level nexus_generation: pending non-nexus zone updates @@ -608,6 +611,7 @@ to: blueprint af934083-59b5-4bf6-8966-6fb5292c29e1 internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) diff --git a/dev-tools/reconfigurator-cli/tests/output/cmds-set-mgs-updates-stdout b/dev-tools/reconfigurator-cli/tests/output/cmds-set-mgs-updates-stdout index 451abc426ae..7dbb2ddddc3 100644 --- a/dev-tools/reconfigurator-cli/tests/output/cmds-set-mgs-updates-stdout +++ b/dev-tools/reconfigurator-cli/tests/output/cmds-set-mgs-updates-stdout @@ -205,6 +205,7 @@ parent: 6ccc786b-17f1-4562-958f-5a7d9a5a15fd internal DNS version::: 1 external DNS version::: 1 target release min gen: 1 + nexus gen:::::::::::::: 1 PENDING MGS-MANAGED UPDATES: 0 @@ -416,6 +417,7 @@ parent: ad97e762-7bf1-45a6-a98f-60afb7e491c0 internal DNS version::: 1 external DNS version::: 1 target release min gen: 1 + nexus gen:::::::::::::: 1 PENDING MGS-MANAGED UPDATES: 1 Pending MGS-managed updates (all baseboards): @@ -441,6 +443,7 @@ to: blueprint cca24b71-09b5-4042-9185-b33e9f2ebba0 internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) @@ -606,6 +609,7 @@ to: blueprint ad97e762-7bf1-45a6-a98f-60afb7e491c0 internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) @@ -963,6 +967,7 @@ parent: cca24b71-09b5-4042-9185-b33e9f2ebba0 internal DNS version::: 1 external DNS version::: 1 target release min gen: 1 + nexus gen:::::::::::::: 1 PENDING MGS-MANAGED UPDATES: 1 Pending MGS-managed updates (all baseboards): @@ -988,6 +993,7 @@ to: blueprint 5bf974f3-81f9-455b-b24e-3099f765664c internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) @@ -1154,6 +1160,7 @@ to: blueprint cca24b71-09b5-4042-9185-b33e9f2ebba0 internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) @@ -1513,6 +1520,7 @@ parent: 5bf974f3-81f9-455b-b24e-3099f765664c internal DNS version::: 1 external DNS version::: 1 target release min gen: 1 + nexus gen:::::::::::::: 1 PENDING MGS-MANAGED UPDATES: 2 Pending MGS-managed updates (all baseboards): @@ -1539,6 +1547,7 @@ to: blueprint 1b837a27-3be1-4fcb-8499-a921c839e1d0 internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) @@ -1895,6 +1904,7 @@ parent: 1b837a27-3be1-4fcb-8499-a921c839e1d0 internal DNS version::: 1 external DNS version::: 1 target release min gen: 1 + nexus gen:::::::::::::: 1 PENDING MGS-MANAGED UPDATES: 1 Pending MGS-managed updates (all baseboards): @@ -1920,6 +1930,7 @@ to: blueprint 3682a71b-c6ca-4b7e-8f84-16df80c85960 internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) diff --git a/dev-tools/reconfigurator-cli/tests/output/cmds-set-remove-mupdate-override-stdout b/dev-tools/reconfigurator-cli/tests/output/cmds-set-remove-mupdate-override-stdout index 01fb9438c11..5ee8dfee717 100644 --- a/dev-tools/reconfigurator-cli/tests/output/cmds-set-remove-mupdate-override-stdout +++ b/dev-tools/reconfigurator-cli/tests/output/cmds-set-remove-mupdate-override-stdout @@ -274,6 +274,7 @@ parent: df06bb57-ad42-4431-9206-abff322896c7 internal DNS version::: 1 external DNS version::: 1 target release min gen: 1 + nexus gen:::::::::::::: 1 PENDING MGS-MANAGED UPDATES: 0 @@ -397,6 +398,7 @@ to: blueprint afb09faf-a586-4483-9289-04d4f1d8ba23 internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) @@ -665,6 +667,7 @@ parent: afb09faf-a586-4483-9289-04d4f1d8ba23 internal DNS version::: 1 external DNS version::: 1 target release min gen: 2 + nexus gen:::::::::::::: 1 PENDING MGS-MANAGED UPDATES: 0 @@ -684,6 +687,7 @@ to: blueprint ce365dff-2cdb-4f35-a186-b15e20e1e700 internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) * target release min gen: 1 -> 2 + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) diff --git a/dev-tools/reconfigurator-cli/tests/output/cmds-set-zone-images-stdout b/dev-tools/reconfigurator-cli/tests/output/cmds-set-zone-images-stdout index f704a5c0af3..37b9c7d3c32 100644 --- a/dev-tools/reconfigurator-cli/tests/output/cmds-set-zone-images-stdout +++ b/dev-tools/reconfigurator-cli/tests/output/cmds-set-zone-images-stdout @@ -107,6 +107,7 @@ parent: 1b013011-2062-4b48-b544-a32b23bce83a internal DNS version::: 1 external DNS version::: 1 target release min gen: 1 + nexus gen:::::::::::::: 1 PENDING MGS-MANAGED UPDATES: 0 @@ -225,6 +226,7 @@ parent: 9766ca20-38d4-4380-b005-e7c43c797e7c internal DNS version::: 1 external DNS version::: 1 target release min gen: 1 + nexus gen:::::::::::::: 1 PENDING MGS-MANAGED UPDATES: 0 @@ -329,6 +331,7 @@ to: blueprint f714e6ea-e85a-4d7d-93c2-a018744fe176 internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) @@ -547,6 +550,7 @@ parent: bb128f06-a2e1-44c1-8874-4f789d0ff896 internal DNS version::: 1 external DNS version::: 1 target release min gen: 1 + nexus gen:::::::::::::: 1 PENDING MGS-MANAGED UPDATES: 0 @@ -651,6 +655,7 @@ to: blueprint d9c572a1-a68c-4945-b1ec-5389bd588fe9 internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) diff --git a/dev-tools/reconfigurator-cli/tests/output/cmds-target-release-stdout b/dev-tools/reconfigurator-cli/tests/output/cmds-target-release-stdout index 7ba66f41052..085b9618c10 100644 --- a/dev-tools/reconfigurator-cli/tests/output/cmds-target-release-stdout +++ b/dev-tools/reconfigurator-cli/tests/output/cmds-target-release-stdout @@ -221,13 +221,203 @@ chicken switches: * 1 pending MGS update: * model0:serial0: RotBootloader(PendingMgsUpdateRotBootloaderDetails { expected_stage0_version: ArtifactVersion("0.0.1"), expected_stage0_next_version: NoValidVersion }) -* zone updates waiting on pending MGS updates (RoT / SP / Host OS / etc.) +* discretionary zones placed: + * nexus zone on sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c from source artifact: version 1.0.0 + * nexus zone on sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 from source artifact: version 1.0.0 + * nexus zone on sled d81c6a84-79b8-4958-ae41-ea46c9b19763 from source artifact: version 1.0.0 +* zone updates waiting on discretionary zones +* waiting to update top-level nexus_generation: pending non-nexus zone updates > blueprint-diff latest from: blueprint dbcbd3d6-41ff-48ae-ac0b-1becc9b2fd21 to: blueprint 8da82a8e-bf97-4fbd-8ddd-9f6462732cf1 + MODIFIED SLEDS: + + sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c (active, config generation 2 -> 3): + + host phase 2 contents: + ------------------------ + slot boot image source + ------------------------ + A current contents + B current contents + + + physical disks: + ------------------------------------------------------------------------------------ + vendor model serial disposition + ------------------------------------------------------------------------------------ + fake-vendor fake-model serial-727522a7-934f-494d-b5b3-160968e74463 in service + fake-vendor fake-model serial-72c59873-31ff-4e36-8d76-ff834009349a in service + fake-vendor fake-model serial-b5fd5bc1-099e-4e77-8028-a9793c11f43b in service + + + datasets: + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + dataset name dataset id disposition quota reservation compression + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + oxp_727522a7-934f-494d-b5b3-160968e74463/crucible 2f204c50-a327-479c-8852-f53ec7a19c1f in service none none off + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crucible 78f34ce7-42f1-41da-995f-318f32054ad2 in service none none off + oxp_b5fd5bc1-099e-4e77-8028-a9793c11f43b/crucible 1640adb6-70bf-44cf-b05c-bff6dd300cf3 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/clickhouse 841d5648-05f0-47b0-b446-92f6b60fe9a6 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/external_dns 8e0bd2bd-23b7-4bc6-9e73-c4d4ebc0bc8c in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/internal_dns 2ad1875a-92ac-472f-8c26-593309f0e4da in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone 4829f422-aa31-41a8-ab73-95684ff1ef48 in service none none off + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/zone 775f9207-c42d-4af2-9186-27ffef67735e in service none none off + oxp_b5fd5bc1-099e-4e77-8028-a9793c11f43b/crypt/zone 3b66453b-7148-4c1b-84a9-499e43290ab4 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_clickhouse_353b3b65-20f7-48c3-88f7-495bd5d31545 b46de15d-33e7-4cd0-aa7c-e7be2a61e71b in service none none off + oxp_b5fd5bc1-099e-4e77-8028-a9793c11f43b/crypt/zone/oxz_crucible_86a22a56-0168-453d-9df1-cb2a7c64b5d3 3e0d6188-c503-49cf-a441-fa7df40ceb43 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_crucible_bd354eef-d8a6-4165-9124-283fb5e46d77 5ae11c7e-08fa-4d78-a4ea-14b4a9a10241 in service none none off + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/zone/oxz_crucible_e2fdefe7-95b2-4fd2-ae37-56929a06d58c b8f2a09f-8bd2-4418-872b-a4457a3f958c in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_crucible_pantry_ad6a3a03-8d0f-4504-99a4-cbf73d69b973 49f8fbb6-5bac-4609-907f-6e3dfc206059 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_external_dns_6c3ae381-04f7-41ea-b0ac-74db387dbc3a 8c4fa711-1d5d-4e93-85f0-d17bff47b063 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_internal_dns_99e2f30b-3174-40bf-a78a-90da8abba8ca c31623de-c19b-4615-9f1d-5e1daa5d3bda in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_nexus_466a9f29-62bf-4e63-924a-b9efdb86afec 3560dd69-3b23-4c69-807d-d673104cfc68 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_ntp_62620961-fc4a-481e-968b-f5acbac0dc63 09b9cc9b-3426-470b-a7bc-538f82dede03 in service none none off + oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/debug 93957ca0-9ed1-4e7b-8c34-2ce07a69541c in service 100 GiB none gzip-9 + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/debug 2db6b7c1-0f46-4ced-a3ad-48872793360e in service 100 GiB none gzip-9 + oxp_b5fd5bc1-099e-4e77-8028-a9793c11f43b/crypt/debug 318fae85-abcb-4259-b1b6-ac96d193f7b7 in service 100 GiB none gzip-9 ++ oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/zone/oxz_nexus_a67ad53f-d551-40e7-abae-57664779b27b 9edcc144-9dd9-4bf9-a26d-26f265400b0b in service none none off + + + omicron zones: + ----------------------------------------------------------------------------------------------------------------------- + zone type zone id image source disposition underlay IP + ----------------------------------------------------------------------------------------------------------------------- + clickhouse 353b3b65-20f7-48c3-88f7-495bd5d31545 install dataset in service fd00:1122:3344:102::23 + crucible 86a22a56-0168-453d-9df1-cb2a7c64b5d3 install dataset in service fd00:1122:3344:102::28 + crucible bd354eef-d8a6-4165-9124-283fb5e46d77 install dataset in service fd00:1122:3344:102::26 + crucible e2fdefe7-95b2-4fd2-ae37-56929a06d58c install dataset in service fd00:1122:3344:102::27 + crucible_pantry ad6a3a03-8d0f-4504-99a4-cbf73d69b973 install dataset in service fd00:1122:3344:102::25 + external_dns 6c3ae381-04f7-41ea-b0ac-74db387dbc3a install dataset in service fd00:1122:3344:102::24 + internal_dns 99e2f30b-3174-40bf-a78a-90da8abba8ca install dataset in service fd00:1122:3344:1::1 + internal_ntp 62620961-fc4a-481e-968b-f5acbac0dc63 install dataset in service fd00:1122:3344:102::21 + nexus 466a9f29-62bf-4e63-924a-b9efdb86afec install dataset in service fd00:1122:3344:102::22 ++ nexus a67ad53f-d551-40e7-abae-57664779b27b artifact: version 1.0.0 in service fd00:1122:3344:102::29 + + + sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 (active, config generation 2 -> 3): + + host phase 2 contents: + ------------------------ + slot boot image source + ------------------------ + A current contents + B current contents + + + physical disks: + ------------------------------------------------------------------------------------ + vendor model serial disposition + ------------------------------------------------------------------------------------ + fake-vendor fake-model serial-073979dd-3248-44a5-9fa1-cc72a140d682 in service + fake-vendor fake-model serial-c6d33b64-fb96-4129-bab1-7878a06a5f9b in service + fake-vendor fake-model serial-e4d937e1-6ddc-4eca-bb08-c1f73791e608 in service + + + datasets: + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + dataset name dataset id disposition quota reservation compression + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crucible 7b4ce6bf-95bb-42fe-a4a0-dff31211ab88 in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crucible ea8a11bf-a884-4c4f-8df0-3ef9b7aacf43 in service none none off + oxp_e4d937e1-6ddc-4eca-bb08-c1f73791e608/crucible 50b029e3-96aa-41e5-bf39-023193a4355e in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/external_dns 4847a96e-a267-4ae7-aa3d-805c1e77f81e in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/internal_dns ad41be71-6c15-4428-b510-20ceacde4fa6 in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone 4617d206-4330-4dfa-b9f3-f63a3db834f9 in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone 793ac181-1b01-403c-850d-7f5c54bda6c9 in service none none off + oxp_e4d937e1-6ddc-4eca-bb08-c1f73791e608/crypt/zone 4f60b534-eaa3-40a1-b60f-bfdf147af478 in service none none off + oxp_e4d937e1-6ddc-4eca-bb08-c1f73791e608/crypt/zone/oxz_crucible_058fd5f9-60a8-4e11-9302-15172782e17d 02c56a30-7d97-406d-bd34-1eb437fd517d in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_crucible_5199c033-4cf9-4ab6-8ae7-566bd7606363 832fd140-d467-4bad-b5e9-63171634087c in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone/oxz_crucible_dfac80b4-a887-430a-ae87-a4e065dba787 4d7e3e8e-06bd-414c-a468-779e056a9b75 in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_crucible_pantry_ba4994a8-23f9-4b1a-a84f-a08d74591389 42430c80-7836-4191-a4f6-bcee749010fe in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_external_dns_803bfb63-c246-41db-b0da-d3b87ddfc63d 43931274-7fe8-4077-825d-dff2bc8efa58 in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_internal_dns_427ec88f-f467-42fa-9bbb-66a91a36103c 1bca7f71-5e42-4749-91ec-fa40793a3a9a in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_nexus_0c71b3b2-6ceb-4e8f-b020-b08675e83038 a4c3032e-21fa-4d4a-b040-a7e3c572cf3c in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_ntp_6444f8a5-6465-4f0b-a549-1993c113569c 3ac089c9-9dec-465b-863a-188e80d71fb4 in service none none off + oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/debug 248c6c10-1ac6-45de-bb55-ede36ca56bbd in service 100 GiB none gzip-9 + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/debug cdf3684f-a6cf-4449-b9ec-e696b2c663e2 in service 100 GiB none gzip-9 + oxp_e4d937e1-6ddc-4eca-bb08-c1f73791e608/crypt/debug 686c19cf-a0d7-45f6-866f-c564612b2664 in service 100 GiB none gzip-9 ++ oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone/oxz_nexus_43cbc3a6-e640-43f5-a9a2-f83eff427870 7f7a2971-de10-4d5a-a814-901adc52bb00 in service none none off + + + omicron zones: + ----------------------------------------------------------------------------------------------------------------------- + zone type zone id image source disposition underlay IP + ----------------------------------------------------------------------------------------------------------------------- + crucible 058fd5f9-60a8-4e11-9302-15172782e17d install dataset in service fd00:1122:3344:101::27 + crucible 5199c033-4cf9-4ab6-8ae7-566bd7606363 install dataset in service fd00:1122:3344:101::25 + crucible dfac80b4-a887-430a-ae87-a4e065dba787 install dataset in service fd00:1122:3344:101::26 + crucible_pantry ba4994a8-23f9-4b1a-a84f-a08d74591389 install dataset in service fd00:1122:3344:101::24 + external_dns 803bfb63-c246-41db-b0da-d3b87ddfc63d install dataset in service fd00:1122:3344:101::23 + internal_dns 427ec88f-f467-42fa-9bbb-66a91a36103c install dataset in service fd00:1122:3344:2::1 + internal_ntp 6444f8a5-6465-4f0b-a549-1993c113569c install dataset in service fd00:1122:3344:101::21 + nexus 0c71b3b2-6ceb-4e8f-b020-b08675e83038 install dataset in service fd00:1122:3344:101::22 ++ nexus 43cbc3a6-e640-43f5-a9a2-f83eff427870 artifact: version 1.0.0 in service fd00:1122:3344:101::28 + + + sled d81c6a84-79b8-4958-ae41-ea46c9b19763 (active, config generation 2 -> 3): + + host phase 2 contents: + ------------------------ + slot boot image source + ------------------------ + A current contents + B current contents + + + physical disks: + ------------------------------------------------------------------------------------ + vendor model serial disposition + ------------------------------------------------------------------------------------ + fake-vendor fake-model serial-18b20749-0748-4105-bb10-7b13cfc776e2 in service + fake-vendor fake-model serial-30c16fe4-4229-49d0-ab01-3138f2c7dff2 in service + fake-vendor fake-model serial-4930954e-9ac7-4453-b63f-5ab97c389a99 in service + + + datasets: + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + dataset name dataset id disposition quota reservation compression + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crucible 7ea73f80-c4e0-450a-92dc-8397ce2af14f in service none none off + oxp_30c16fe4-4229-49d0-ab01-3138f2c7dff2/crucible 6f04dd20-5e2c-4fa8-8430-a886470ed140 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crucible a50cd13a-5749-4e79-bb8b-19229500a8b3 in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/external_dns 96ae8389-3027-4260-9374-e0f6ce851de2 in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/internal_dns 1cb0a47a-59ac-4892-8e92-cf87b4290f96 in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone 45cd9687-20be-4247-b62a-dfdacf324929 in service none none off + oxp_30c16fe4-4229-49d0-ab01-3138f2c7dff2/crypt/zone e009d8b8-4695-4322-b53f-f03f2744aef7 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/zone 252ac39f-b9e2-4697-8c07-3a833115d704 in service none none off + oxp_30c16fe4-4229-49d0-ab01-3138f2c7dff2/crypt/zone/oxz_crucible_694bd14f-cb24-4be4-bb19-876e79cda2c8 3443a368-199e-4d26-b59f-3f2bbd507761 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/zone/oxz_crucible_7c252b64-c5af-4ec1-989e-9a03f3b0f111 429da94b-19f7-48bd-98e9-47842863ba7b in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_crucible_f55647d4-5500-4ad3-893a-df45bd50d622 50ea8c15-c4c0-4403-a490-d14b3405dfc2 in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_crucible_pantry_75b220ba-a0f4-4872-8202-dc7c87f062d0 54bbadaf-ec04-41a2-a62f-f5ac5bf321be in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_external_dns_f6ec9c67-946a-4da3-98d5-581f72ce8bf0 090bd88d-0a43-4040-a832-b13ae721f74f in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_internal_dns_ea5b4030-b52f-44b2-8d70-45f15f987d01 b1deff4b-51df-4a37-9043-afbd7c70a1cb in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_nexus_3eeb8d49-eb1a-43f8-bb64-c2338421c2c6 4da74a5b-6911-4cca-b624-b90c65530117 in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_ntp_f10a4fb9-759f-4a65-b25e-5794ad2d07d8 c65a9c1c-36dc-4ddb-8aac-ec3be8dbb209 in service none none off + oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/debug 7a6a2058-ea78-49de-9730-cce5e28b4cfb in service 100 GiB none gzip-9 + oxp_30c16fe4-4229-49d0-ab01-3138f2c7dff2/crypt/debug 41071985-1dfd-4ce5-8bc2-897161a8bce4 in service 100 GiB none gzip-9 + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/debug 21fd4f3a-ec31-469b-87b1-087c343a2422 in service 100 GiB none gzip-9 ++ oxp_30c16fe4-4229-49d0-ab01-3138f2c7dff2/crypt/zone/oxz_nexus_26fbf986-e560-4449-a351-547d1721b90e e0b86bc5-6a64-432b-bcbe-482e228a4e7d in service none none off + + + omicron zones: + ----------------------------------------------------------------------------------------------------------------------- + zone type zone id image source disposition underlay IP + ----------------------------------------------------------------------------------------------------------------------- + crucible 694bd14f-cb24-4be4-bb19-876e79cda2c8 install dataset in service fd00:1122:3344:103::26 + crucible 7c252b64-c5af-4ec1-989e-9a03f3b0f111 install dataset in service fd00:1122:3344:103::27 + crucible f55647d4-5500-4ad3-893a-df45bd50d622 install dataset in service fd00:1122:3344:103::25 + crucible_pantry 75b220ba-a0f4-4872-8202-dc7c87f062d0 install dataset in service fd00:1122:3344:103::24 + external_dns f6ec9c67-946a-4da3-98d5-581f72ce8bf0 install dataset in service fd00:1122:3344:103::23 + internal_dns ea5b4030-b52f-44b2-8d70-45f15f987d01 install dataset in service fd00:1122:3344:3::1 + internal_ntp f10a4fb9-759f-4a65-b25e-5794ad2d07d8 install dataset in service fd00:1122:3344:103::21 + nexus 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6 install dataset in service fd00:1122:3344:103::22 ++ nexus 26fbf986-e560-4449-a351-547d1721b90e artifact: version 1.0.0 in service fd00:1122:3344:103::28 + + COCKROACHDB SETTINGS: state fingerprint::::::::::::::::: (none) (unchanged) cluster.preserve_downgrade_option: (do not modify) (unchanged) @@ -236,6 +426,7 @@ to: blueprint 8da82a8e-bf97-4fbd-8ddd-9f6462732cf1 internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) @@ -251,11 +442,13 @@ to: blueprint 8da82a8e-bf97-4fbd-8ddd-9f6462732cf1 internal DNS: - DNS zone: "control-plane.oxide.internal" (unchanged) +* DNS zone: "control-plane.oxide.internal": name: 058fd5f9-60a8-4e11-9302-15172782e17d.host (records: 1) AAAA fd00:1122:3344:101::27 name: 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host (records: 1) AAAA fd00:1122:3344:101::22 ++ name: 26fbf986-e560-4449-a351-547d1721b90e.host (records: 1) ++ AAAA fd00:1122:3344:103::28 name: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled (records: 1) AAAA fd00:1122:3344:102::1 name: 353b3b65-20f7-48c3-88f7-495bd5d31545.host (records: 1) @@ -264,6 +457,8 @@ internal DNS: AAAA fd00:1122:3344:103::22 name: 427ec88f-f467-42fa-9bbb-66a91a36103c.host (records: 1) AAAA fd00:1122:3344:2::1 ++ name: 43cbc3a6-e640-43f5-a9a2-f83eff427870.host (records: 1) ++ AAAA fd00:1122:3344:101::28 name: 466a9f29-62bf-4e63-924a-b9efdb86afec.host (records: 1) AAAA fd00:1122:3344:102::22 name: 5199c033-4cf9-4ab6-8ae7-566bd7606363.host (records: 1) @@ -332,16 +527,24 @@ internal DNS: SRV port 5353 427ec88f-f467-42fa-9bbb-66a91a36103c.host.control-plane.oxide.internal SRV port 5353 99e2f30b-3174-40bf-a78a-90da8abba8ca.host.control-plane.oxide.internal SRV port 5353 ea5b4030-b52f-44b2-8d70-45f15f987d01.host.control-plane.oxide.internal - name: _nexus._tcp (records: 3) - SRV port 12221 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host.control-plane.oxide.internal - SRV port 12221 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host.control-plane.oxide.internal - SRV port 12221 466a9f29-62bf-4e63-924a-b9efdb86afec.host.control-plane.oxide.internal +* name: _nexus._tcp (records: 3 -> 6) +- SRV port 12221 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host.control-plane.oxide.internal +- SRV port 12221 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host.control-plane.oxide.internal +- SRV port 12221 466a9f29-62bf-4e63-924a-b9efdb86afec.host.control-plane.oxide.internal ++ SRV port 12221 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host.control-plane.oxide.internal ++ SRV port 12221 26fbf986-e560-4449-a351-547d1721b90e.host.control-plane.oxide.internal ++ SRV port 12221 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host.control-plane.oxide.internal ++ SRV port 12221 43cbc3a6-e640-43f5-a9a2-f83eff427870.host.control-plane.oxide.internal ++ SRV port 12221 466a9f29-62bf-4e63-924a-b9efdb86afec.host.control-plane.oxide.internal ++ SRV port 12221 a67ad53f-d551-40e7-abae-57664779b27b.host.control-plane.oxide.internal name: _oximeter-reader._tcp (records: 1) SRV port 9000 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal name: _repo-depot._tcp (records: 3) SRV port 12348 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled.control-plane.oxide.internal SRV port 12348 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6.sled.control-plane.oxide.internal SRV port 12348 d81c6a84-79b8-4958-ae41-ea46c9b19763.sled.control-plane.oxide.internal ++ name: a67ad53f-d551-40e7-abae-57664779b27b.host (records: 1) ++ AAAA fd00:1122:3344:102::29 name: ad6a3a03-8d0f-4504-99a4-cbf73d69b973.host (records: 1) AAAA fd00:1122:3344:102::25 name: ba4994a8-23f9-4b1a-a84f-a08d74591389.host (records: 1) @@ -370,15 +573,21 @@ internal DNS: AAAA fd00:1122:3344:3::1 external DNS: - DNS zone: "oxide.example" (unchanged) +* DNS zone: "oxide.example": name: @ (records: 3) NS ns1.oxide.example NS ns2.oxide.example NS ns3.oxide.example - name: example-silo.sys (records: 3) - A 192.0.2.2 - A 192.0.2.3 - A 192.0.2.4 +* name: example-silo.sys (records: 3 -> 6) +- A 192.0.2.2 +- A 192.0.2.3 +- A 192.0.2.4 ++ A 192.0.2.2 ++ A 192.0.2.7 ++ A 192.0.2.3 ++ A 192.0.2.6 ++ A 192.0.2.5 ++ A 192.0.2.4 name: ns1 (records: 1) A 198.51.100.1 name: ns2 (records: 1) @@ -391,13 +600,13 @@ external DNS: > # If we generate another plan, there should be no change. > blueprint-plan latest latest -INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 0, num_eligible: 0, num_ineligible: 9 +INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 10, num_already_artifact: 1, num_eligible: 0, num_ineligible: 9 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: b, expected_hash: 0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b -INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 9, num_already_artifact: 1, num_eligible: 0, num_ineligible: 8 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: b, expected_hash: 0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b -INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 9, num_already_artifact: 1, num_eligible: 0, num_ineligible: 8 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: b, expected_hash: 0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b INFO MGS-driven update not yet completed (will keep it), artifact_version: 1.0.0, artifact_hash: 5b0f601b1fbb8674db9c751a02f8b14f8e6d4e8470f4f7b686fecb2c49ec11f9, expected_stage0_next_version: NoValidVersion, expected_stage0_version: 0.0.1, component: rot_bootloader, sp_slot: 0, sp_type: Sled, serial_number: serial0, part_number: model0 @@ -410,6 +619,7 @@ chicken switches: * 1 pending MGS update: * model0:serial0: RotBootloader(PendingMgsUpdateRotBootloaderDetails { expected_stage0_version: ArtifactVersion("0.0.1"), expected_stage0_next_version: NoValidVersion }) * zone updates waiting on pending MGS updates (RoT / SP / Host OS / etc.) +* waiting to update top-level nexus_generation: pending non-nexus zone updates > blueprint-diff latest @@ -424,6 +634,7 @@ to: blueprint 58d5e830-0884-47d8-a7cd-b2b3751adeb4 internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) @@ -436,6 +647,8 @@ internal DNS: AAAA fd00:1122:3344:101::27 name: 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host (records: 1) AAAA fd00:1122:3344:101::22 + name: 26fbf986-e560-4449-a351-547d1721b90e.host (records: 1) + AAAA fd00:1122:3344:103::28 name: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled (records: 1) AAAA fd00:1122:3344:102::1 name: 353b3b65-20f7-48c3-88f7-495bd5d31545.host (records: 1) @@ -444,6 +657,8 @@ internal DNS: AAAA fd00:1122:3344:103::22 name: 427ec88f-f467-42fa-9bbb-66a91a36103c.host (records: 1) AAAA fd00:1122:3344:2::1 + name: 43cbc3a6-e640-43f5-a9a2-f83eff427870.host (records: 1) + AAAA fd00:1122:3344:101::28 name: 466a9f29-62bf-4e63-924a-b9efdb86afec.host (records: 1) AAAA fd00:1122:3344:102::22 name: 5199c033-4cf9-4ab6-8ae7-566bd7606363.host (records: 1) @@ -512,16 +727,21 @@ internal DNS: SRV port 5353 427ec88f-f467-42fa-9bbb-66a91a36103c.host.control-plane.oxide.internal SRV port 5353 99e2f30b-3174-40bf-a78a-90da8abba8ca.host.control-plane.oxide.internal SRV port 5353 ea5b4030-b52f-44b2-8d70-45f15f987d01.host.control-plane.oxide.internal - name: _nexus._tcp (records: 3) + name: _nexus._tcp (records: 6) SRV port 12221 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host.control-plane.oxide.internal + SRV port 12221 26fbf986-e560-4449-a351-547d1721b90e.host.control-plane.oxide.internal SRV port 12221 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host.control-plane.oxide.internal + SRV port 12221 43cbc3a6-e640-43f5-a9a2-f83eff427870.host.control-plane.oxide.internal SRV port 12221 466a9f29-62bf-4e63-924a-b9efdb86afec.host.control-plane.oxide.internal + SRV port 12221 a67ad53f-d551-40e7-abae-57664779b27b.host.control-plane.oxide.internal name: _oximeter-reader._tcp (records: 1) SRV port 9000 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal name: _repo-depot._tcp (records: 3) SRV port 12348 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled.control-plane.oxide.internal SRV port 12348 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6.sled.control-plane.oxide.internal SRV port 12348 d81c6a84-79b8-4958-ae41-ea46c9b19763.sled.control-plane.oxide.internal + name: a67ad53f-d551-40e7-abae-57664779b27b.host (records: 1) + AAAA fd00:1122:3344:102::29 name: ad6a3a03-8d0f-4504-99a4-cbf73d69b973.host (records: 1) AAAA fd00:1122:3344:102::25 name: ba4994a8-23f9-4b1a-a84f-a08d74591389.host (records: 1) @@ -555,9 +775,12 @@ external DNS: NS ns1.oxide.example NS ns2.oxide.example NS ns3.oxide.example - name: example-silo.sys (records: 3) + name: example-silo.sys (records: 6) A 192.0.2.2 + A 192.0.2.7 A 192.0.2.3 + A 192.0.2.6 + A 192.0.2.5 A 192.0.2.4 name: ns1 (records: 1) A 198.51.100.1 @@ -580,13 +803,13 @@ set sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 RoT bootloader versions: stage0 -> generated inventory collection eb0796d5-ab8a-4f7b-a884-b4aeacb8ab51 from configured sleds > blueprint-plan latest latest -INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 0, num_eligible: 0, num_ineligible: 9 +INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 10, num_already_artifact: 1, num_eligible: 0, num_ineligible: 9 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: b, expected_hash: 0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b -INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 9, num_already_artifact: 1, num_eligible: 0, num_ineligible: 8 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: b, expected_hash: 0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b -INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 9, num_already_artifact: 1, num_eligible: 0, num_ineligible: 8 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: b, expected_hash: 0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b INFO MGS-driven update completed (will remove it and re-evaluate board), artifact_version: 1.0.0, artifact_hash: 5b0f601b1fbb8674db9c751a02f8b14f8e6d4e8470f4f7b686fecb2c49ec11f9, expected_stage0_next_version: NoValidVersion, expected_stage0_version: 0.0.1, component: rot_bootloader, sp_slot: 0, sp_type: Sled, serial_number: serial0, part_number: model0 @@ -600,6 +823,7 @@ chicken switches: * 1 pending MGS update: * model0:serial0: Rot(PendingMgsUpdateRotDetails { expected_active_slot: ExpectedActiveRotSlot { slot: A, version: ArtifactVersion("0.0.2") }, expected_inactive_version: NoValidVersion, expected_persistent_boot_preference: A, expected_pending_persistent_boot_preference: None, expected_transient_boot_preference: None }) * zone updates waiting on pending MGS updates (RoT / SP / Host OS / etc.) +* waiting to update top-level nexus_generation: pending non-nexus zone updates > blueprint-diff latest @@ -614,6 +838,7 @@ to: blueprint af934083-59b5-4bf6-8966-6fb5292c29e1 internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) @@ -635,6 +860,8 @@ internal DNS: AAAA fd00:1122:3344:101::27 name: 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host (records: 1) AAAA fd00:1122:3344:101::22 + name: 26fbf986-e560-4449-a351-547d1721b90e.host (records: 1) + AAAA fd00:1122:3344:103::28 name: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled (records: 1) AAAA fd00:1122:3344:102::1 name: 353b3b65-20f7-48c3-88f7-495bd5d31545.host (records: 1) @@ -643,6 +870,8 @@ internal DNS: AAAA fd00:1122:3344:103::22 name: 427ec88f-f467-42fa-9bbb-66a91a36103c.host (records: 1) AAAA fd00:1122:3344:2::1 + name: 43cbc3a6-e640-43f5-a9a2-f83eff427870.host (records: 1) + AAAA fd00:1122:3344:101::28 name: 466a9f29-62bf-4e63-924a-b9efdb86afec.host (records: 1) AAAA fd00:1122:3344:102::22 name: 5199c033-4cf9-4ab6-8ae7-566bd7606363.host (records: 1) @@ -711,16 +940,21 @@ internal DNS: SRV port 5353 427ec88f-f467-42fa-9bbb-66a91a36103c.host.control-plane.oxide.internal SRV port 5353 99e2f30b-3174-40bf-a78a-90da8abba8ca.host.control-plane.oxide.internal SRV port 5353 ea5b4030-b52f-44b2-8d70-45f15f987d01.host.control-plane.oxide.internal - name: _nexus._tcp (records: 3) + name: _nexus._tcp (records: 6) SRV port 12221 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host.control-plane.oxide.internal + SRV port 12221 26fbf986-e560-4449-a351-547d1721b90e.host.control-plane.oxide.internal SRV port 12221 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host.control-plane.oxide.internal + SRV port 12221 43cbc3a6-e640-43f5-a9a2-f83eff427870.host.control-plane.oxide.internal SRV port 12221 466a9f29-62bf-4e63-924a-b9efdb86afec.host.control-plane.oxide.internal + SRV port 12221 a67ad53f-d551-40e7-abae-57664779b27b.host.control-plane.oxide.internal name: _oximeter-reader._tcp (records: 1) SRV port 9000 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal name: _repo-depot._tcp (records: 3) SRV port 12348 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled.control-plane.oxide.internal SRV port 12348 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6.sled.control-plane.oxide.internal SRV port 12348 d81c6a84-79b8-4958-ae41-ea46c9b19763.sled.control-plane.oxide.internal + name: a67ad53f-d551-40e7-abae-57664779b27b.host (records: 1) + AAAA fd00:1122:3344:102::29 name: ad6a3a03-8d0f-4504-99a4-cbf73d69b973.host (records: 1) AAAA fd00:1122:3344:102::25 name: ba4994a8-23f9-4b1a-a84f-a08d74591389.host (records: 1) @@ -754,9 +988,12 @@ external DNS: NS ns1.oxide.example NS ns2.oxide.example NS ns3.oxide.example - name: example-silo.sys (records: 3) + name: example-silo.sys (records: 6) A 192.0.2.2 + A 192.0.2.7 A 192.0.2.3 + A 192.0.2.6 + A 192.0.2.5 A 192.0.2.4 name: ns1 (records: 1) A 198.51.100.1 @@ -779,13 +1016,13 @@ set sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 RoT settings: slot a -> 1.0.0 generated inventory collection 61f451b3-2121-4ed6-91c7-a550054f6c21 from configured sleds > blueprint-plan latest latest -INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 0, num_eligible: 0, num_ineligible: 9 +INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 10, num_already_artifact: 1, num_eligible: 0, num_ineligible: 9 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: b, expected_hash: 0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b -INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 9, num_already_artifact: 1, num_eligible: 0, num_ineligible: 8 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: b, expected_hash: 0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b -INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 9, num_already_artifact: 1, num_eligible: 0, num_ineligible: 8 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: b, expected_hash: 0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b INFO MGS-driven update completed (will remove it and re-evaluate board), artifact_version: 1.0.0, artifact_hash: d11e65f934bf0de51df2e5b484f61ee72072417b43ac87f33e958008428e7b02, expected_transient_boot_preference: None, expected_pending_persistent_boot_preference: None, expected_persistent_boot_preference: A, expected_active_slot: ExpectedActiveRotSlot { slot: A, version: ArtifactVersion("0.0.2") }, expected_inactive_version: NoValidVersion, component: rot, sp_slot: 0, sp_type: Sled, serial_number: serial0, part_number: model0 @@ -799,6 +1036,7 @@ chicken switches: * 1 pending MGS update: * model0:serial0: Sp(PendingMgsUpdateSpDetails { expected_active_version: ArtifactVersion("0.0.1"), expected_inactive_version: NoValidVersion }) * zone updates waiting on pending MGS updates (RoT / SP / Host OS / etc.) +* waiting to update top-level nexus_generation: pending non-nexus zone updates > blueprint-diff latest @@ -813,6 +1051,7 @@ to: blueprint df06bb57-ad42-4431-9206-abff322896c7 internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) @@ -834,6 +1073,8 @@ internal DNS: AAAA fd00:1122:3344:101::27 name: 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host (records: 1) AAAA fd00:1122:3344:101::22 + name: 26fbf986-e560-4449-a351-547d1721b90e.host (records: 1) + AAAA fd00:1122:3344:103::28 name: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled (records: 1) AAAA fd00:1122:3344:102::1 name: 353b3b65-20f7-48c3-88f7-495bd5d31545.host (records: 1) @@ -842,6 +1083,8 @@ internal DNS: AAAA fd00:1122:3344:103::22 name: 427ec88f-f467-42fa-9bbb-66a91a36103c.host (records: 1) AAAA fd00:1122:3344:2::1 + name: 43cbc3a6-e640-43f5-a9a2-f83eff427870.host (records: 1) + AAAA fd00:1122:3344:101::28 name: 466a9f29-62bf-4e63-924a-b9efdb86afec.host (records: 1) AAAA fd00:1122:3344:102::22 name: 5199c033-4cf9-4ab6-8ae7-566bd7606363.host (records: 1) @@ -910,16 +1153,21 @@ internal DNS: SRV port 5353 427ec88f-f467-42fa-9bbb-66a91a36103c.host.control-plane.oxide.internal SRV port 5353 99e2f30b-3174-40bf-a78a-90da8abba8ca.host.control-plane.oxide.internal SRV port 5353 ea5b4030-b52f-44b2-8d70-45f15f987d01.host.control-plane.oxide.internal - name: _nexus._tcp (records: 3) + name: _nexus._tcp (records: 6) SRV port 12221 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host.control-plane.oxide.internal + SRV port 12221 26fbf986-e560-4449-a351-547d1721b90e.host.control-plane.oxide.internal SRV port 12221 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host.control-plane.oxide.internal + SRV port 12221 43cbc3a6-e640-43f5-a9a2-f83eff427870.host.control-plane.oxide.internal SRV port 12221 466a9f29-62bf-4e63-924a-b9efdb86afec.host.control-plane.oxide.internal + SRV port 12221 a67ad53f-d551-40e7-abae-57664779b27b.host.control-plane.oxide.internal name: _oximeter-reader._tcp (records: 1) SRV port 9000 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal name: _repo-depot._tcp (records: 3) SRV port 12348 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled.control-plane.oxide.internal SRV port 12348 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6.sled.control-plane.oxide.internal SRV port 12348 d81c6a84-79b8-4958-ae41-ea46c9b19763.sled.control-plane.oxide.internal + name: a67ad53f-d551-40e7-abae-57664779b27b.host (records: 1) + AAAA fd00:1122:3344:102::29 name: ad6a3a03-8d0f-4504-99a4-cbf73d69b973.host (records: 1) AAAA fd00:1122:3344:102::25 name: ba4994a8-23f9-4b1a-a84f-a08d74591389.host (records: 1) @@ -953,9 +1201,12 @@ external DNS: NS ns1.oxide.example NS ns2.oxide.example NS ns3.oxide.example - name: example-silo.sys (records: 3) + name: example-silo.sys (records: 6) A 192.0.2.2 + A 192.0.2.7 A 192.0.2.3 + A 192.0.2.6 + A 192.0.2.5 A 192.0.2.4 name: ns1 (records: 1) A 198.51.100.1 @@ -979,13 +1230,13 @@ set sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 SP versions: active -> 1.0.0 generated inventory collection b1bda47d-2c19-4fba-96e3-d9df28db7436 from configured sleds > blueprint-plan latest latest -INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 0, num_eligible: 0, num_ineligible: 9 +INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 10, num_already_artifact: 1, num_eligible: 0, num_ineligible: 9 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: b, expected_hash: 0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b -INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 9, num_already_artifact: 1, num_eligible: 0, num_ineligible: 8 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: b, expected_hash: 0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b -INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 9, num_already_artifact: 1, num_eligible: 0, num_ineligible: 8 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: b, expected_hash: 0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b INFO MGS-driven update completed (will remove it and re-evaluate board), artifact_version: 1.0.0, artifact_hash: 68465b8e3f808f475510b525cfd62086d37ddd57688bd854184fdafb2b2198a4, expected_inactive_version: NoValidVersion, expected_active_version: 0.0.1, component: sp, sp_slot: 0, sp_type: Sled, serial_number: serial0, part_number: model0 @@ -999,6 +1250,7 @@ chicken switches: * 1 pending MGS update: * model0:serial0: HostPhase1(PendingMgsUpdateHostPhase1Details { expected_active_phase_1_slot: A, expected_boot_disk: A, expected_active_phase_1_hash: ArtifactHash("0101010101010101010101010101010101010101010101010101010101010101"), expected_active_phase_2_hash: ArtifactHash("0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a"), expected_inactive_phase_1_hash: ArtifactHash("0202020202020202020202020202020202020202020202020202020202020202"), expected_inactive_phase_2_hash: ArtifactHash("f3dd0c7a1bd4500ea0d8bcf67581f576d47752b2f1998a4cb0f0c3155c483008"), sled_agent_address: [fd00:1122:3344:101::1]:12345 }) * zone updates waiting on pending MGS updates (RoT / SP / Host OS / etc.) +* waiting to update top-level nexus_generation: pending non-nexus zone updates > blueprint-diff latest @@ -1007,7 +1259,7 @@ to: blueprint 7f976e0d-d2a5-4eeb-9e82-c82bc2824aba MODIFIED SLEDS: - sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 (active, config generation 2 -> 3): + sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 (active, config generation 3 -> 4): host phase 2 contents: -------------------------------- @@ -1046,6 +1298,7 @@ to: blueprint 7f976e0d-d2a5-4eeb-9e82-c82bc2824aba oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_external_dns_803bfb63-c246-41db-b0da-d3b87ddfc63d 43931274-7fe8-4077-825d-dff2bc8efa58 in service none none off oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_internal_dns_427ec88f-f467-42fa-9bbb-66a91a36103c 1bca7f71-5e42-4749-91ec-fa40793a3a9a in service none none off oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_nexus_0c71b3b2-6ceb-4e8f-b020-b08675e83038 a4c3032e-21fa-4d4a-b040-a7e3c572cf3c in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone/oxz_nexus_43cbc3a6-e640-43f5-a9a2-f83eff427870 7f7a2971-de10-4d5a-a814-901adc52bb00 in service none none off oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/zone/oxz_ntp_6444f8a5-6465-4f0b-a549-1993c113569c 3ac089c9-9dec-465b-863a-188e80d71fb4 in service none none off oxp_073979dd-3248-44a5-9fa1-cc72a140d682/crypt/debug 248c6c10-1ac6-45de-bb55-ede36ca56bbd in service 100 GiB none gzip-9 oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/debug cdf3684f-a6cf-4449-b9ec-e696b2c663e2 in service 100 GiB none gzip-9 @@ -1053,17 +1306,18 @@ to: blueprint 7f976e0d-d2a5-4eeb-9e82-c82bc2824aba omicron zones: - --------------------------------------------------------------------------------------------------------------- - zone type zone id image source disposition underlay IP - --------------------------------------------------------------------------------------------------------------- - crucible 058fd5f9-60a8-4e11-9302-15172782e17d install dataset in service fd00:1122:3344:101::27 - crucible 5199c033-4cf9-4ab6-8ae7-566bd7606363 install dataset in service fd00:1122:3344:101::25 - crucible dfac80b4-a887-430a-ae87-a4e065dba787 install dataset in service fd00:1122:3344:101::26 - crucible_pantry ba4994a8-23f9-4b1a-a84f-a08d74591389 install dataset in service fd00:1122:3344:101::24 - external_dns 803bfb63-c246-41db-b0da-d3b87ddfc63d install dataset in service fd00:1122:3344:101::23 - internal_dns 427ec88f-f467-42fa-9bbb-66a91a36103c install dataset in service fd00:1122:3344:2::1 - internal_ntp 6444f8a5-6465-4f0b-a549-1993c113569c install dataset in service fd00:1122:3344:101::21 - nexus 0c71b3b2-6ceb-4e8f-b020-b08675e83038 install dataset in service fd00:1122:3344:101::22 + ----------------------------------------------------------------------------------------------------------------------- + zone type zone id image source disposition underlay IP + ----------------------------------------------------------------------------------------------------------------------- + crucible 058fd5f9-60a8-4e11-9302-15172782e17d install dataset in service fd00:1122:3344:101::27 + crucible 5199c033-4cf9-4ab6-8ae7-566bd7606363 install dataset in service fd00:1122:3344:101::25 + crucible dfac80b4-a887-430a-ae87-a4e065dba787 install dataset in service fd00:1122:3344:101::26 + crucible_pantry ba4994a8-23f9-4b1a-a84f-a08d74591389 install dataset in service fd00:1122:3344:101::24 + external_dns 803bfb63-c246-41db-b0da-d3b87ddfc63d install dataset in service fd00:1122:3344:101::23 + internal_dns 427ec88f-f467-42fa-9bbb-66a91a36103c install dataset in service fd00:1122:3344:2::1 + internal_ntp 6444f8a5-6465-4f0b-a549-1993c113569c install dataset in service fd00:1122:3344:101::21 + nexus 0c71b3b2-6ceb-4e8f-b020-b08675e83038 install dataset in service fd00:1122:3344:101::22 + nexus 43cbc3a6-e640-43f5-a9a2-f83eff427870 artifact: version 1.0.0 in service fd00:1122:3344:101::28 COCKROACHDB SETTINGS: @@ -1074,6 +1328,7 @@ to: blueprint 7f976e0d-d2a5-4eeb-9e82-c82bc2824aba internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) @@ -1095,6 +1350,8 @@ internal DNS: AAAA fd00:1122:3344:101::27 name: 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host (records: 1) AAAA fd00:1122:3344:101::22 + name: 26fbf986-e560-4449-a351-547d1721b90e.host (records: 1) + AAAA fd00:1122:3344:103::28 name: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled (records: 1) AAAA fd00:1122:3344:102::1 name: 353b3b65-20f7-48c3-88f7-495bd5d31545.host (records: 1) @@ -1103,6 +1360,8 @@ internal DNS: AAAA fd00:1122:3344:103::22 name: 427ec88f-f467-42fa-9bbb-66a91a36103c.host (records: 1) AAAA fd00:1122:3344:2::1 + name: 43cbc3a6-e640-43f5-a9a2-f83eff427870.host (records: 1) + AAAA fd00:1122:3344:101::28 name: 466a9f29-62bf-4e63-924a-b9efdb86afec.host (records: 1) AAAA fd00:1122:3344:102::22 name: 5199c033-4cf9-4ab6-8ae7-566bd7606363.host (records: 1) @@ -1171,16 +1430,21 @@ internal DNS: SRV port 5353 427ec88f-f467-42fa-9bbb-66a91a36103c.host.control-plane.oxide.internal SRV port 5353 99e2f30b-3174-40bf-a78a-90da8abba8ca.host.control-plane.oxide.internal SRV port 5353 ea5b4030-b52f-44b2-8d70-45f15f987d01.host.control-plane.oxide.internal - name: _nexus._tcp (records: 3) + name: _nexus._tcp (records: 6) SRV port 12221 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host.control-plane.oxide.internal + SRV port 12221 26fbf986-e560-4449-a351-547d1721b90e.host.control-plane.oxide.internal SRV port 12221 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host.control-plane.oxide.internal + SRV port 12221 43cbc3a6-e640-43f5-a9a2-f83eff427870.host.control-plane.oxide.internal SRV port 12221 466a9f29-62bf-4e63-924a-b9efdb86afec.host.control-plane.oxide.internal + SRV port 12221 a67ad53f-d551-40e7-abae-57664779b27b.host.control-plane.oxide.internal name: _oximeter-reader._tcp (records: 1) SRV port 9000 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal name: _repo-depot._tcp (records: 3) SRV port 12348 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled.control-plane.oxide.internal SRV port 12348 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6.sled.control-plane.oxide.internal SRV port 12348 d81c6a84-79b8-4958-ae41-ea46c9b19763.sled.control-plane.oxide.internal + name: a67ad53f-d551-40e7-abae-57664779b27b.host (records: 1) + AAAA fd00:1122:3344:102::29 name: ad6a3a03-8d0f-4504-99a4-cbf73d69b973.host (records: 1) AAAA fd00:1122:3344:102::25 name: ba4994a8-23f9-4b1a-a84f-a08d74591389.host (records: 1) @@ -1214,9 +1478,12 @@ external DNS: NS ns1.oxide.example NS ns2.oxide.example NS ns3.oxide.example - name: example-silo.sys (records: 3) + name: example-silo.sys (records: 6) A 192.0.2.2 + A 192.0.2.7 A 192.0.2.3 + A 192.0.2.6 + A 192.0.2.5 A 192.0.2.4 name: ns1 (records: 1) A 198.51.100.1 @@ -1230,12 +1497,12 @@ external DNS: > # If we generate another plan, there should be no change. > blueprint-plan latest latest -INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 0, num_eligible: 0, num_ineligible: 9 +INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 10, num_already_artifact: 1, num_eligible: 0, num_ineligible: 9 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: b, expected_hash: 0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b -INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 9, num_already_artifact: 1, num_eligible: 0, num_ineligible: 8 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a -INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 9, num_already_artifact: 1, num_eligible: 0, num_ineligible: 8 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: b, expected_hash: 0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b INFO MGS-driven update not yet completed (will keep it), artifact_version: 1.0.0, artifact_hash: 2053f8594971bbf0a7326c833e2ffc12b065b9d823b9c0b967d275fa595e4e89, sled_agent_address: [fd00:1122:3344:101::1]:12345, expected_inactive_phase_2_hash: f3dd0c7a1bd4500ea0d8bcf67581f576d47752b2f1998a4cb0f0c3155c483008, expected_inactive_phase_1_hash: 0202020202020202020202020202020202020202020202020202020202020202, expected_active_phase_2_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a, expected_active_phase_1_hash: 0101010101010101010101010101010101010101010101010101010101010101, expected_boot_disk: A, expected_active_phase_1_slot: A, component: host_phase_1, sp_slot: 0, sp_type: Sled, serial_number: serial0, part_number: model0 @@ -1248,6 +1515,7 @@ chicken switches: * 1 pending MGS update: * model0:serial0: HostPhase1(PendingMgsUpdateHostPhase1Details { expected_active_phase_1_slot: A, expected_boot_disk: A, expected_active_phase_1_hash: ArtifactHash("0101010101010101010101010101010101010101010101010101010101010101"), expected_active_phase_2_hash: ArtifactHash("0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a"), expected_inactive_phase_1_hash: ArtifactHash("0202020202020202020202020202020202020202020202020202020202020202"), expected_inactive_phase_2_hash: ArtifactHash("f3dd0c7a1bd4500ea0d8bcf67581f576d47752b2f1998a4cb0f0c3155c483008"), sled_agent_address: [fd00:1122:3344:101::1]:12345 }) * zone updates waiting on pending MGS updates (RoT / SP / Host OS / etc.) +* waiting to update top-level nexus_generation: pending non-nexus zone updates > blueprint-diff latest @@ -1262,6 +1530,7 @@ to: blueprint 9034c710-3e57-45f3-99e5-4316145e87ac internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) @@ -1274,6 +1543,8 @@ internal DNS: AAAA fd00:1122:3344:101::27 name: 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host (records: 1) AAAA fd00:1122:3344:101::22 + name: 26fbf986-e560-4449-a351-547d1721b90e.host (records: 1) + AAAA fd00:1122:3344:103::28 name: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled (records: 1) AAAA fd00:1122:3344:102::1 name: 353b3b65-20f7-48c3-88f7-495bd5d31545.host (records: 1) @@ -1282,6 +1553,8 @@ internal DNS: AAAA fd00:1122:3344:103::22 name: 427ec88f-f467-42fa-9bbb-66a91a36103c.host (records: 1) AAAA fd00:1122:3344:2::1 + name: 43cbc3a6-e640-43f5-a9a2-f83eff427870.host (records: 1) + AAAA fd00:1122:3344:101::28 name: 466a9f29-62bf-4e63-924a-b9efdb86afec.host (records: 1) AAAA fd00:1122:3344:102::22 name: 5199c033-4cf9-4ab6-8ae7-566bd7606363.host (records: 1) @@ -1350,16 +1623,21 @@ internal DNS: SRV port 5353 427ec88f-f467-42fa-9bbb-66a91a36103c.host.control-plane.oxide.internal SRV port 5353 99e2f30b-3174-40bf-a78a-90da8abba8ca.host.control-plane.oxide.internal SRV port 5353 ea5b4030-b52f-44b2-8d70-45f15f987d01.host.control-plane.oxide.internal - name: _nexus._tcp (records: 3) + name: _nexus._tcp (records: 6) SRV port 12221 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host.control-plane.oxide.internal + SRV port 12221 26fbf986-e560-4449-a351-547d1721b90e.host.control-plane.oxide.internal SRV port 12221 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host.control-plane.oxide.internal + SRV port 12221 43cbc3a6-e640-43f5-a9a2-f83eff427870.host.control-plane.oxide.internal SRV port 12221 466a9f29-62bf-4e63-924a-b9efdb86afec.host.control-plane.oxide.internal + SRV port 12221 a67ad53f-d551-40e7-abae-57664779b27b.host.control-plane.oxide.internal name: _oximeter-reader._tcp (records: 1) SRV port 9000 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal name: _repo-depot._tcp (records: 3) SRV port 12348 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled.control-plane.oxide.internal SRV port 12348 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6.sled.control-plane.oxide.internal SRV port 12348 d81c6a84-79b8-4958-ae41-ea46c9b19763.sled.control-plane.oxide.internal + name: a67ad53f-d551-40e7-abae-57664779b27b.host (records: 1) + AAAA fd00:1122:3344:102::29 name: ad6a3a03-8d0f-4504-99a4-cbf73d69b973.host (records: 1) AAAA fd00:1122:3344:102::25 name: ba4994a8-23f9-4b1a-a84f-a08d74591389.host (records: 1) @@ -1393,9 +1671,12 @@ external DNS: NS ns1.oxide.example NS ns2.oxide.example NS ns3.oxide.example - name: example-silo.sys (records: 3) + name: example-silo.sys (records: 6) A 192.0.2.2 + A 192.0.2.7 A 192.0.2.3 + A 192.0.2.6 + A 192.0.2.5 A 192.0.2.4 name: ns1 (records: 1) A 198.51.100.1 @@ -1419,12 +1700,12 @@ generated inventory collection a71f7a73-35a6-45e8-acbe-f1c5925eed69 from configu > # Planning after only phase 2 has changed should make no changes. We're still > # waiting on phase 1 to change. > blueprint-plan latest latest -INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 0, num_eligible: 0, num_ineligible: 9 +INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 10, num_already_artifact: 1, num_eligible: 0, num_ineligible: 9 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: b, expected_hash: 0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b -INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 9, num_already_artifact: 1, num_eligible: 0, num_ineligible: 8 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a -INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 9, num_already_artifact: 1, num_eligible: 0, num_ineligible: 8 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: b, expected_hash: 0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b INFO MGS-driven update not yet completed (will keep it), artifact_version: 1.0.0, artifact_hash: 2053f8594971bbf0a7326c833e2ffc12b065b9d823b9c0b967d275fa595e4e89, sled_agent_address: [fd00:1122:3344:101::1]:12345, expected_inactive_phase_2_hash: f3dd0c7a1bd4500ea0d8bcf67581f576d47752b2f1998a4cb0f0c3155c483008, expected_inactive_phase_1_hash: 0202020202020202020202020202020202020202020202020202020202020202, expected_active_phase_2_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a, expected_active_phase_1_hash: 0101010101010101010101010101010101010101010101010101010101010101, expected_boot_disk: A, expected_active_phase_1_slot: A, component: host_phase_1, sp_slot: 0, sp_type: Sled, serial_number: serial0, part_number: model0 @@ -1437,6 +1718,7 @@ chicken switches: * 1 pending MGS update: * model0:serial0: HostPhase1(PendingMgsUpdateHostPhase1Details { expected_active_phase_1_slot: A, expected_boot_disk: A, expected_active_phase_1_hash: ArtifactHash("0101010101010101010101010101010101010101010101010101010101010101"), expected_active_phase_2_hash: ArtifactHash("0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a"), expected_inactive_phase_1_hash: ArtifactHash("0202020202020202020202020202020202020202020202020202020202020202"), expected_inactive_phase_2_hash: ArtifactHash("f3dd0c7a1bd4500ea0d8bcf67581f576d47752b2f1998a4cb0f0c3155c483008"), sled_agent_address: [fd00:1122:3344:101::1]:12345 }) * zone updates waiting on pending MGS updates (RoT / SP / Host OS / etc.) +* waiting to update top-level nexus_generation: pending non-nexus zone updates > blueprint-diff latest @@ -1451,6 +1733,7 @@ to: blueprint d60afc57-f15d-476c-bd0f-b1071e2bb976 internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) @@ -1463,6 +1746,8 @@ internal DNS: AAAA fd00:1122:3344:101::27 name: 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host (records: 1) AAAA fd00:1122:3344:101::22 + name: 26fbf986-e560-4449-a351-547d1721b90e.host (records: 1) + AAAA fd00:1122:3344:103::28 name: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled (records: 1) AAAA fd00:1122:3344:102::1 name: 353b3b65-20f7-48c3-88f7-495bd5d31545.host (records: 1) @@ -1471,6 +1756,8 @@ internal DNS: AAAA fd00:1122:3344:103::22 name: 427ec88f-f467-42fa-9bbb-66a91a36103c.host (records: 1) AAAA fd00:1122:3344:2::1 + name: 43cbc3a6-e640-43f5-a9a2-f83eff427870.host (records: 1) + AAAA fd00:1122:3344:101::28 name: 466a9f29-62bf-4e63-924a-b9efdb86afec.host (records: 1) AAAA fd00:1122:3344:102::22 name: 5199c033-4cf9-4ab6-8ae7-566bd7606363.host (records: 1) @@ -1539,16 +1826,21 @@ internal DNS: SRV port 5353 427ec88f-f467-42fa-9bbb-66a91a36103c.host.control-plane.oxide.internal SRV port 5353 99e2f30b-3174-40bf-a78a-90da8abba8ca.host.control-plane.oxide.internal SRV port 5353 ea5b4030-b52f-44b2-8d70-45f15f987d01.host.control-plane.oxide.internal - name: _nexus._tcp (records: 3) + name: _nexus._tcp (records: 6) SRV port 12221 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host.control-plane.oxide.internal + SRV port 12221 26fbf986-e560-4449-a351-547d1721b90e.host.control-plane.oxide.internal SRV port 12221 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host.control-plane.oxide.internal + SRV port 12221 43cbc3a6-e640-43f5-a9a2-f83eff427870.host.control-plane.oxide.internal SRV port 12221 466a9f29-62bf-4e63-924a-b9efdb86afec.host.control-plane.oxide.internal + SRV port 12221 a67ad53f-d551-40e7-abae-57664779b27b.host.control-plane.oxide.internal name: _oximeter-reader._tcp (records: 1) SRV port 9000 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal name: _repo-depot._tcp (records: 3) SRV port 12348 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled.control-plane.oxide.internal SRV port 12348 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6.sled.control-plane.oxide.internal SRV port 12348 d81c6a84-79b8-4958-ae41-ea46c9b19763.sled.control-plane.oxide.internal + name: a67ad53f-d551-40e7-abae-57664779b27b.host (records: 1) + AAAA fd00:1122:3344:102::29 name: ad6a3a03-8d0f-4504-99a4-cbf73d69b973.host (records: 1) AAAA fd00:1122:3344:102::25 name: ba4994a8-23f9-4b1a-a84f-a08d74591389.host (records: 1) @@ -1582,9 +1874,12 @@ external DNS: NS ns1.oxide.example NS ns2.oxide.example NS ns3.oxide.example - name: example-silo.sys (records: 3) + name: example-silo.sys (records: 6) A 192.0.2.2 + A 192.0.2.7 A 192.0.2.3 + A 192.0.2.6 + A 192.0.2.5 A 192.0.2.4 name: ns1 (records: 1) A 198.51.100.1 @@ -1607,12 +1902,12 @@ generated inventory collection 0b5efbb3-0b1b-4bbf-b7d8-a2d6fca074c6 from configu > # Planning _still_ shouldn't make any new changes; the OS update as a whole > # isn't done until sled-agent reports it has booted from the new image. > blueprint-plan latest latest -INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 0, num_eligible: 0, num_ineligible: 9 +INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 10, num_already_artifact: 1, num_eligible: 0, num_ineligible: 9 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: b, expected_hash: 0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b -INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 9, num_already_artifact: 1, num_eligible: 0, num_ineligible: 8 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a -INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 9, num_already_artifact: 1, num_eligible: 0, num_ineligible: 8 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: b, expected_hash: 0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b INFO keeping apparently-impossible MGS-driven update (waiting for recent update to be applied), artifact_version: 1.0.0, artifact_hash: 2053f8594971bbf0a7326c833e2ffc12b065b9d823b9c0b967d275fa595e4e89, sled_agent_address: [fd00:1122:3344:101::1]:12345, expected_inactive_phase_2_hash: f3dd0c7a1bd4500ea0d8bcf67581f576d47752b2f1998a4cb0f0c3155c483008, expected_inactive_phase_1_hash: 0202020202020202020202020202020202020202020202020202020202020202, expected_active_phase_2_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a, expected_active_phase_1_hash: 0101010101010101010101010101010101010101010101010101010101010101, expected_boot_disk: A, expected_active_phase_1_slot: A, component: host_phase_1, sp_slot: 0, sp_type: Sled, serial_number: serial0, part_number: model0 @@ -1625,6 +1920,7 @@ chicken switches: * 1 pending MGS update: * model0:serial0: HostPhase1(PendingMgsUpdateHostPhase1Details { expected_active_phase_1_slot: A, expected_boot_disk: A, expected_active_phase_1_hash: ArtifactHash("0101010101010101010101010101010101010101010101010101010101010101"), expected_active_phase_2_hash: ArtifactHash("0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a"), expected_inactive_phase_1_hash: ArtifactHash("0202020202020202020202020202020202020202020202020202020202020202"), expected_inactive_phase_2_hash: ArtifactHash("f3dd0c7a1bd4500ea0d8bcf67581f576d47752b2f1998a4cb0f0c3155c483008"), sled_agent_address: [fd00:1122:3344:101::1]:12345 }) * zone updates waiting on pending MGS updates (RoT / SP / Host OS / etc.) +* waiting to update top-level nexus_generation: pending non-nexus zone updates > blueprint-diff latest @@ -1639,6 +1935,7 @@ to: blueprint a5a8f242-ffa5-473c-8efd-2acf2dc0b736 internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) @@ -1651,6 +1948,8 @@ internal DNS: AAAA fd00:1122:3344:101::27 name: 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host (records: 1) AAAA fd00:1122:3344:101::22 + name: 26fbf986-e560-4449-a351-547d1721b90e.host (records: 1) + AAAA fd00:1122:3344:103::28 name: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled (records: 1) AAAA fd00:1122:3344:102::1 name: 353b3b65-20f7-48c3-88f7-495bd5d31545.host (records: 1) @@ -1659,6 +1958,8 @@ internal DNS: AAAA fd00:1122:3344:103::22 name: 427ec88f-f467-42fa-9bbb-66a91a36103c.host (records: 1) AAAA fd00:1122:3344:2::1 + name: 43cbc3a6-e640-43f5-a9a2-f83eff427870.host (records: 1) + AAAA fd00:1122:3344:101::28 name: 466a9f29-62bf-4e63-924a-b9efdb86afec.host (records: 1) AAAA fd00:1122:3344:102::22 name: 5199c033-4cf9-4ab6-8ae7-566bd7606363.host (records: 1) @@ -1727,16 +2028,21 @@ internal DNS: SRV port 5353 427ec88f-f467-42fa-9bbb-66a91a36103c.host.control-plane.oxide.internal SRV port 5353 99e2f30b-3174-40bf-a78a-90da8abba8ca.host.control-plane.oxide.internal SRV port 5353 ea5b4030-b52f-44b2-8d70-45f15f987d01.host.control-plane.oxide.internal - name: _nexus._tcp (records: 3) + name: _nexus._tcp (records: 6) SRV port 12221 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host.control-plane.oxide.internal + SRV port 12221 26fbf986-e560-4449-a351-547d1721b90e.host.control-plane.oxide.internal SRV port 12221 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host.control-plane.oxide.internal + SRV port 12221 43cbc3a6-e640-43f5-a9a2-f83eff427870.host.control-plane.oxide.internal SRV port 12221 466a9f29-62bf-4e63-924a-b9efdb86afec.host.control-plane.oxide.internal + SRV port 12221 a67ad53f-d551-40e7-abae-57664779b27b.host.control-plane.oxide.internal name: _oximeter-reader._tcp (records: 1) SRV port 9000 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal name: _repo-depot._tcp (records: 3) SRV port 12348 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled.control-plane.oxide.internal SRV port 12348 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6.sled.control-plane.oxide.internal SRV port 12348 d81c6a84-79b8-4958-ae41-ea46c9b19763.sled.control-plane.oxide.internal + name: a67ad53f-d551-40e7-abae-57664779b27b.host (records: 1) + AAAA fd00:1122:3344:102::29 name: ad6a3a03-8d0f-4504-99a4-cbf73d69b973.host (records: 1) AAAA fd00:1122:3344:102::25 name: ba4994a8-23f9-4b1a-a84f-a08d74591389.host (records: 1) @@ -1770,9 +2076,12 @@ external DNS: NS ns1.oxide.example NS ns2.oxide.example NS ns3.oxide.example - name: example-silo.sys (records: 3) + name: example-silo.sys (records: 6) A 192.0.2.2 + A 192.0.2.7 A 192.0.2.3 + A 192.0.2.6 + A 192.0.2.5 A 192.0.2.4 name: ns1 (records: 1) A 198.51.100.1 @@ -1795,12 +2104,12 @@ generated inventory collection 78f72e8d-46a9-40a9-8618-602f54454d80 from configu > # Planning should now remove the host OS update and plan the next RoT bootloader > # update. > blueprint-plan latest latest -INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 0, num_eligible: 0, num_ineligible: 9 +INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 10, num_already_artifact: 1, num_eligible: 0, num_ineligible: 9 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: b, expected_hash: 0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b -INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 9, num_already_artifact: 1, num_eligible: 0, num_ineligible: 8 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a -INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 9, num_already_artifact: 1, num_eligible: 0, num_ineligible: 8 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: b, expected_hash: 0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b INFO MGS-driven update completed (will remove it and re-evaluate board), artifact_version: 1.0.0, artifact_hash: 2053f8594971bbf0a7326c833e2ffc12b065b9d823b9c0b967d275fa595e4e89, sled_agent_address: [fd00:1122:3344:101::1]:12345, expected_inactive_phase_2_hash: f3dd0c7a1bd4500ea0d8bcf67581f576d47752b2f1998a4cb0f0c3155c483008, expected_inactive_phase_1_hash: 0202020202020202020202020202020202020202020202020202020202020202, expected_active_phase_2_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a, expected_active_phase_1_hash: 0101010101010101010101010101010101010101010101010101010101010101, expected_boot_disk: A, expected_active_phase_1_slot: A, component: host_phase_1, sp_slot: 0, sp_type: Sled, serial_number: serial0, part_number: model0 @@ -1815,6 +2124,7 @@ chicken switches: * 1 pending MGS update: * model1:serial1: RotBootloader(PendingMgsUpdateRotBootloaderDetails { expected_stage0_version: ArtifactVersion("0.0.1"), expected_stage0_next_version: NoValidVersion }) * zone updates waiting on pending MGS updates (RoT / SP / Host OS / etc.) +* waiting to update top-level nexus_generation: pending non-nexus zone updates > blueprint-diff latest @@ -1829,6 +2139,7 @@ to: blueprint 626487fa-7139-45ec-8416-902271fc730b internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) @@ -1850,6 +2161,8 @@ internal DNS: AAAA fd00:1122:3344:101::27 name: 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host (records: 1) AAAA fd00:1122:3344:101::22 + name: 26fbf986-e560-4449-a351-547d1721b90e.host (records: 1) + AAAA fd00:1122:3344:103::28 name: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled (records: 1) AAAA fd00:1122:3344:102::1 name: 353b3b65-20f7-48c3-88f7-495bd5d31545.host (records: 1) @@ -1858,6 +2171,8 @@ internal DNS: AAAA fd00:1122:3344:103::22 name: 427ec88f-f467-42fa-9bbb-66a91a36103c.host (records: 1) AAAA fd00:1122:3344:2::1 + name: 43cbc3a6-e640-43f5-a9a2-f83eff427870.host (records: 1) + AAAA fd00:1122:3344:101::28 name: 466a9f29-62bf-4e63-924a-b9efdb86afec.host (records: 1) AAAA fd00:1122:3344:102::22 name: 5199c033-4cf9-4ab6-8ae7-566bd7606363.host (records: 1) @@ -1926,16 +2241,21 @@ internal DNS: SRV port 5353 427ec88f-f467-42fa-9bbb-66a91a36103c.host.control-plane.oxide.internal SRV port 5353 99e2f30b-3174-40bf-a78a-90da8abba8ca.host.control-plane.oxide.internal SRV port 5353 ea5b4030-b52f-44b2-8d70-45f15f987d01.host.control-plane.oxide.internal - name: _nexus._tcp (records: 3) + name: _nexus._tcp (records: 6) SRV port 12221 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host.control-plane.oxide.internal + SRV port 12221 26fbf986-e560-4449-a351-547d1721b90e.host.control-plane.oxide.internal SRV port 12221 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host.control-plane.oxide.internal + SRV port 12221 43cbc3a6-e640-43f5-a9a2-f83eff427870.host.control-plane.oxide.internal SRV port 12221 466a9f29-62bf-4e63-924a-b9efdb86afec.host.control-plane.oxide.internal + SRV port 12221 a67ad53f-d551-40e7-abae-57664779b27b.host.control-plane.oxide.internal name: _oximeter-reader._tcp (records: 1) SRV port 9000 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal name: _repo-depot._tcp (records: 3) SRV port 12348 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled.control-plane.oxide.internal SRV port 12348 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6.sled.control-plane.oxide.internal SRV port 12348 d81c6a84-79b8-4958-ae41-ea46c9b19763.sled.control-plane.oxide.internal + name: a67ad53f-d551-40e7-abae-57664779b27b.host (records: 1) + AAAA fd00:1122:3344:102::29 name: ad6a3a03-8d0f-4504-99a4-cbf73d69b973.host (records: 1) AAAA fd00:1122:3344:102::25 name: ba4994a8-23f9-4b1a-a84f-a08d74591389.host (records: 1) @@ -1969,9 +2289,12 @@ external DNS: NS ns1.oxide.example NS ns2.oxide.example NS ns3.oxide.example - name: example-silo.sys (records: 3) + name: example-silo.sys (records: 6) A 192.0.2.2 + A 192.0.2.7 A 192.0.2.3 + A 192.0.2.6 + A 192.0.2.5 A 192.0.2.4 name: ns1 (records: 1) A 198.51.100.1 @@ -1997,12 +2320,12 @@ set sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c RoT bootloader versions: stage0_ne generated inventory collection 39363465-89ae-4ac2-9be1-099068da9d45 from configured sleds > blueprint-plan latest latest -INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 0, num_eligible: 0, num_ineligible: 9 +INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 10, num_already_artifact: 1, num_eligible: 0, num_ineligible: 9 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: b, expected_hash: 0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b -INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 9, num_already_artifact: 1, num_eligible: 0, num_ineligible: 8 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a -INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 9, num_already_artifact: 1, num_eligible: 0, num_ineligible: 8 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: b, expected_hash: 0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b INFO MGS-driven update impossible (will remove it and re-evaluate board), artifact_version: 1.0.0, artifact_hash: 5b0f601b1fbb8674db9c751a02f8b14f8e6d4e8470f4f7b686fecb2c49ec11f9, expected_stage0_next_version: NoValidVersion, expected_stage0_version: 0.0.1, component: rot_bootloader, sp_slot: 1, sp_type: Sled, serial_number: serial1, part_number: model1 @@ -2016,6 +2339,7 @@ chicken switches: * 1 pending MGS update: * model1:serial1: RotBootloader(PendingMgsUpdateRotBootloaderDetails { expected_stage0_version: ArtifactVersion("0.0.1"), expected_stage0_next_version: Version(ArtifactVersion("0.5.0")) }) * zone updates waiting on pending MGS updates (RoT / SP / Host OS / etc.) +* waiting to update top-level nexus_generation: pending non-nexus zone updates > blueprint-diff latest @@ -2030,6 +2354,7 @@ to: blueprint c1a0d242-9160-40f4-96ae-61f8f40a0b1b internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) @@ -2051,6 +2376,8 @@ internal DNS: AAAA fd00:1122:3344:101::27 name: 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host (records: 1) AAAA fd00:1122:3344:101::22 + name: 26fbf986-e560-4449-a351-547d1721b90e.host (records: 1) + AAAA fd00:1122:3344:103::28 name: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled (records: 1) AAAA fd00:1122:3344:102::1 name: 353b3b65-20f7-48c3-88f7-495bd5d31545.host (records: 1) @@ -2059,6 +2386,8 @@ internal DNS: AAAA fd00:1122:3344:103::22 name: 427ec88f-f467-42fa-9bbb-66a91a36103c.host (records: 1) AAAA fd00:1122:3344:2::1 + name: 43cbc3a6-e640-43f5-a9a2-f83eff427870.host (records: 1) + AAAA fd00:1122:3344:101::28 name: 466a9f29-62bf-4e63-924a-b9efdb86afec.host (records: 1) AAAA fd00:1122:3344:102::22 name: 5199c033-4cf9-4ab6-8ae7-566bd7606363.host (records: 1) @@ -2127,16 +2456,21 @@ internal DNS: SRV port 5353 427ec88f-f467-42fa-9bbb-66a91a36103c.host.control-plane.oxide.internal SRV port 5353 99e2f30b-3174-40bf-a78a-90da8abba8ca.host.control-plane.oxide.internal SRV port 5353 ea5b4030-b52f-44b2-8d70-45f15f987d01.host.control-plane.oxide.internal - name: _nexus._tcp (records: 3) + name: _nexus._tcp (records: 6) SRV port 12221 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host.control-plane.oxide.internal + SRV port 12221 26fbf986-e560-4449-a351-547d1721b90e.host.control-plane.oxide.internal SRV port 12221 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host.control-plane.oxide.internal + SRV port 12221 43cbc3a6-e640-43f5-a9a2-f83eff427870.host.control-plane.oxide.internal SRV port 12221 466a9f29-62bf-4e63-924a-b9efdb86afec.host.control-plane.oxide.internal + SRV port 12221 a67ad53f-d551-40e7-abae-57664779b27b.host.control-plane.oxide.internal name: _oximeter-reader._tcp (records: 1) SRV port 9000 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal name: _repo-depot._tcp (records: 3) SRV port 12348 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled.control-plane.oxide.internal SRV port 12348 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6.sled.control-plane.oxide.internal SRV port 12348 d81c6a84-79b8-4958-ae41-ea46c9b19763.sled.control-plane.oxide.internal + name: a67ad53f-d551-40e7-abae-57664779b27b.host (records: 1) + AAAA fd00:1122:3344:102::29 name: ad6a3a03-8d0f-4504-99a4-cbf73d69b973.host (records: 1) AAAA fd00:1122:3344:102::25 name: ba4994a8-23f9-4b1a-a84f-a08d74591389.host (records: 1) @@ -2170,9 +2504,12 @@ external DNS: NS ns1.oxide.example NS ns2.oxide.example NS ns3.oxide.example - name: example-silo.sys (records: 3) + name: example-silo.sys (records: 6) A 192.0.2.2 + A 192.0.2.7 A 192.0.2.3 + A 192.0.2.6 + A 192.0.2.5 A 192.0.2.4 name: ns1 (records: 1) A 198.51.100.1 @@ -2193,12 +2530,12 @@ set sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c RoT bootloader versions: stage0 -> generated inventory collection 04bc9001-0836-4fec-b9cb-9d4760caf8b4 from configured sleds > blueprint-plan latest latest -INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 0, num_eligible: 0, num_ineligible: 9 +INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 10, num_already_artifact: 1, num_eligible: 0, num_ineligible: 9 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: b, expected_hash: 0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b -INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 9, num_already_artifact: 1, num_eligible: 0, num_ineligible: 8 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a -INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 9, num_already_artifact: 1, num_eligible: 0, num_ineligible: 8 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: b, expected_hash: 0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b INFO MGS-driven update completed (will remove it and re-evaluate board), artifact_version: 1.0.0, artifact_hash: 5b0f601b1fbb8674db9c751a02f8b14f8e6d4e8470f4f7b686fecb2c49ec11f9, expected_stage0_next_version: Version(ArtifactVersion("0.5.0")), expected_stage0_version: 0.0.1, component: rot_bootloader, sp_slot: 1, sp_type: Sled, serial_number: serial1, part_number: model1 @@ -2212,6 +2549,7 @@ chicken switches: * 1 pending MGS update: * model1:serial1: Rot(PendingMgsUpdateRotDetails { expected_active_slot: ExpectedActiveRotSlot { slot: A, version: ArtifactVersion("0.0.2") }, expected_inactive_version: NoValidVersion, expected_persistent_boot_preference: A, expected_pending_persistent_boot_preference: None, expected_transient_boot_preference: None }) * zone updates waiting on pending MGS updates (RoT / SP / Host OS / etc.) +* waiting to update top-level nexus_generation: pending non-nexus zone updates > blueprint-diff latest @@ -2226,6 +2564,7 @@ to: blueprint afb09faf-a586-4483-9289-04d4f1d8ba23 internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) @@ -2247,6 +2586,8 @@ internal DNS: AAAA fd00:1122:3344:101::27 name: 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host (records: 1) AAAA fd00:1122:3344:101::22 + name: 26fbf986-e560-4449-a351-547d1721b90e.host (records: 1) + AAAA fd00:1122:3344:103::28 name: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled (records: 1) AAAA fd00:1122:3344:102::1 name: 353b3b65-20f7-48c3-88f7-495bd5d31545.host (records: 1) @@ -2255,6 +2596,8 @@ internal DNS: AAAA fd00:1122:3344:103::22 name: 427ec88f-f467-42fa-9bbb-66a91a36103c.host (records: 1) AAAA fd00:1122:3344:2::1 + name: 43cbc3a6-e640-43f5-a9a2-f83eff427870.host (records: 1) + AAAA fd00:1122:3344:101::28 name: 466a9f29-62bf-4e63-924a-b9efdb86afec.host (records: 1) AAAA fd00:1122:3344:102::22 name: 5199c033-4cf9-4ab6-8ae7-566bd7606363.host (records: 1) @@ -2323,16 +2666,21 @@ internal DNS: SRV port 5353 427ec88f-f467-42fa-9bbb-66a91a36103c.host.control-plane.oxide.internal SRV port 5353 99e2f30b-3174-40bf-a78a-90da8abba8ca.host.control-plane.oxide.internal SRV port 5353 ea5b4030-b52f-44b2-8d70-45f15f987d01.host.control-plane.oxide.internal - name: _nexus._tcp (records: 3) + name: _nexus._tcp (records: 6) SRV port 12221 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host.control-plane.oxide.internal + SRV port 12221 26fbf986-e560-4449-a351-547d1721b90e.host.control-plane.oxide.internal SRV port 12221 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host.control-plane.oxide.internal + SRV port 12221 43cbc3a6-e640-43f5-a9a2-f83eff427870.host.control-plane.oxide.internal SRV port 12221 466a9f29-62bf-4e63-924a-b9efdb86afec.host.control-plane.oxide.internal + SRV port 12221 a67ad53f-d551-40e7-abae-57664779b27b.host.control-plane.oxide.internal name: _oximeter-reader._tcp (records: 1) SRV port 9000 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal name: _repo-depot._tcp (records: 3) SRV port 12348 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled.control-plane.oxide.internal SRV port 12348 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6.sled.control-plane.oxide.internal SRV port 12348 d81c6a84-79b8-4958-ae41-ea46c9b19763.sled.control-plane.oxide.internal + name: a67ad53f-d551-40e7-abae-57664779b27b.host (records: 1) + AAAA fd00:1122:3344:102::29 name: ad6a3a03-8d0f-4504-99a4-cbf73d69b973.host (records: 1) AAAA fd00:1122:3344:102::25 name: ba4994a8-23f9-4b1a-a84f-a08d74591389.host (records: 1) @@ -2366,9 +2714,12 @@ external DNS: NS ns1.oxide.example NS ns2.oxide.example NS ns3.oxide.example - name: example-silo.sys (records: 3) + name: example-silo.sys (records: 6) A 192.0.2.2 + A 192.0.2.7 A 192.0.2.3 + A 192.0.2.6 + A 192.0.2.5 A 192.0.2.4 name: ns1 (records: 1) A 198.51.100.1 @@ -2393,12 +2744,12 @@ set sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c RoT settings: slot b -> 0.5.0 generated inventory collection 08abe624-4b5f-491c-90cb-d74a84e4ba3e from configured sleds > blueprint-plan latest latest -INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 0, num_eligible: 0, num_ineligible: 9 +INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 10, num_already_artifact: 1, num_eligible: 0, num_ineligible: 9 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: b, expected_hash: 0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b -INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 9, num_already_artifact: 1, num_eligible: 0, num_ineligible: 8 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a -INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 9, num_already_artifact: 1, num_eligible: 0, num_ineligible: 8 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: b, expected_hash: 0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b INFO MGS-driven update impossible (will remove it and re-evaluate board), artifact_version: 1.0.0, artifact_hash: d11e65f934bf0de51df2e5b484f61ee72072417b43ac87f33e958008428e7b02, expected_transient_boot_preference: None, expected_pending_persistent_boot_preference: None, expected_persistent_boot_preference: A, expected_active_slot: ExpectedActiveRotSlot { slot: A, version: ArtifactVersion("0.0.2") }, expected_inactive_version: NoValidVersion, component: rot, sp_slot: 1, sp_type: Sled, serial_number: serial1, part_number: model1 @@ -2412,6 +2763,7 @@ chicken switches: * 1 pending MGS update: * model1:serial1: Rot(PendingMgsUpdateRotDetails { expected_active_slot: ExpectedActiveRotSlot { slot: A, version: ArtifactVersion("0.0.2") }, expected_inactive_version: Version(ArtifactVersion("0.5.0")), expected_persistent_boot_preference: A, expected_pending_persistent_boot_preference: None, expected_transient_boot_preference: None }) * zone updates waiting on pending MGS updates (RoT / SP / Host OS / etc.) +* waiting to update top-level nexus_generation: pending non-nexus zone updates > blueprint-diff latest @@ -2426,6 +2778,7 @@ to: blueprint ce365dff-2cdb-4f35-a186-b15e20e1e700 internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) @@ -2447,6 +2800,8 @@ internal DNS: AAAA fd00:1122:3344:101::27 name: 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host (records: 1) AAAA fd00:1122:3344:101::22 + name: 26fbf986-e560-4449-a351-547d1721b90e.host (records: 1) + AAAA fd00:1122:3344:103::28 name: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled (records: 1) AAAA fd00:1122:3344:102::1 name: 353b3b65-20f7-48c3-88f7-495bd5d31545.host (records: 1) @@ -2455,6 +2810,8 @@ internal DNS: AAAA fd00:1122:3344:103::22 name: 427ec88f-f467-42fa-9bbb-66a91a36103c.host (records: 1) AAAA fd00:1122:3344:2::1 + name: 43cbc3a6-e640-43f5-a9a2-f83eff427870.host (records: 1) + AAAA fd00:1122:3344:101::28 name: 466a9f29-62bf-4e63-924a-b9efdb86afec.host (records: 1) AAAA fd00:1122:3344:102::22 name: 5199c033-4cf9-4ab6-8ae7-566bd7606363.host (records: 1) @@ -2523,16 +2880,21 @@ internal DNS: SRV port 5353 427ec88f-f467-42fa-9bbb-66a91a36103c.host.control-plane.oxide.internal SRV port 5353 99e2f30b-3174-40bf-a78a-90da8abba8ca.host.control-plane.oxide.internal SRV port 5353 ea5b4030-b52f-44b2-8d70-45f15f987d01.host.control-plane.oxide.internal - name: _nexus._tcp (records: 3) + name: _nexus._tcp (records: 6) SRV port 12221 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host.control-plane.oxide.internal + SRV port 12221 26fbf986-e560-4449-a351-547d1721b90e.host.control-plane.oxide.internal SRV port 12221 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host.control-plane.oxide.internal + SRV port 12221 43cbc3a6-e640-43f5-a9a2-f83eff427870.host.control-plane.oxide.internal SRV port 12221 466a9f29-62bf-4e63-924a-b9efdb86afec.host.control-plane.oxide.internal + SRV port 12221 a67ad53f-d551-40e7-abae-57664779b27b.host.control-plane.oxide.internal name: _oximeter-reader._tcp (records: 1) SRV port 9000 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal name: _repo-depot._tcp (records: 3) SRV port 12348 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled.control-plane.oxide.internal SRV port 12348 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6.sled.control-plane.oxide.internal SRV port 12348 d81c6a84-79b8-4958-ae41-ea46c9b19763.sled.control-plane.oxide.internal + name: a67ad53f-d551-40e7-abae-57664779b27b.host (records: 1) + AAAA fd00:1122:3344:102::29 name: ad6a3a03-8d0f-4504-99a4-cbf73d69b973.host (records: 1) AAAA fd00:1122:3344:102::25 name: ba4994a8-23f9-4b1a-a84f-a08d74591389.host (records: 1) @@ -2566,9 +2928,12 @@ external DNS: NS ns1.oxide.example NS ns2.oxide.example NS ns3.oxide.example - name: example-silo.sys (records: 3) + name: example-silo.sys (records: 6) A 192.0.2.2 + A 192.0.2.7 A 192.0.2.3 + A 192.0.2.6 + A 192.0.2.5 A 192.0.2.4 name: ns1 (records: 1) A 198.51.100.1 @@ -2589,12 +2954,12 @@ set sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c RoT settings: slot a -> 1.0.0 generated inventory collection 005f6a30-7f65-4593-9f78-ee68f766f42b from configured sleds > blueprint-plan latest latest -INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 0, num_eligible: 0, num_ineligible: 9 +INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 10, num_already_artifact: 1, num_eligible: 0, num_ineligible: 9 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: b, expected_hash: 0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b -INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 9, num_already_artifact: 1, num_eligible: 0, num_ineligible: 8 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a -INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 9, num_already_artifact: 1, num_eligible: 0, num_ineligible: 8 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: b, expected_hash: 0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b INFO MGS-driven update completed (will remove it and re-evaluate board), artifact_version: 1.0.0, artifact_hash: d11e65f934bf0de51df2e5b484f61ee72072417b43ac87f33e958008428e7b02, expected_transient_boot_preference: None, expected_pending_persistent_boot_preference: None, expected_persistent_boot_preference: A, expected_active_slot: ExpectedActiveRotSlot { slot: A, version: ArtifactVersion("0.0.2") }, expected_inactive_version: Version(ArtifactVersion("0.5.0")), component: rot, sp_slot: 1, sp_type: Sled, serial_number: serial1, part_number: model1 @@ -2608,6 +2973,7 @@ chicken switches: * 1 pending MGS update: * model1:serial1: Sp(PendingMgsUpdateSpDetails { expected_active_version: ArtifactVersion("0.0.1"), expected_inactive_version: NoValidVersion }) * zone updates waiting on pending MGS updates (RoT / SP / Host OS / etc.) +* waiting to update top-level nexus_generation: pending non-nexus zone updates > blueprint-diff latest @@ -2622,6 +2988,7 @@ to: blueprint 8f2d1f39-7c88-4701-aa43-56bf281b28c1 internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) @@ -2643,6 +3010,8 @@ internal DNS: AAAA fd00:1122:3344:101::27 name: 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host (records: 1) AAAA fd00:1122:3344:101::22 + name: 26fbf986-e560-4449-a351-547d1721b90e.host (records: 1) + AAAA fd00:1122:3344:103::28 name: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled (records: 1) AAAA fd00:1122:3344:102::1 name: 353b3b65-20f7-48c3-88f7-495bd5d31545.host (records: 1) @@ -2651,6 +3020,8 @@ internal DNS: AAAA fd00:1122:3344:103::22 name: 427ec88f-f467-42fa-9bbb-66a91a36103c.host (records: 1) AAAA fd00:1122:3344:2::1 + name: 43cbc3a6-e640-43f5-a9a2-f83eff427870.host (records: 1) + AAAA fd00:1122:3344:101::28 name: 466a9f29-62bf-4e63-924a-b9efdb86afec.host (records: 1) AAAA fd00:1122:3344:102::22 name: 5199c033-4cf9-4ab6-8ae7-566bd7606363.host (records: 1) @@ -2719,16 +3090,21 @@ internal DNS: SRV port 5353 427ec88f-f467-42fa-9bbb-66a91a36103c.host.control-plane.oxide.internal SRV port 5353 99e2f30b-3174-40bf-a78a-90da8abba8ca.host.control-plane.oxide.internal SRV port 5353 ea5b4030-b52f-44b2-8d70-45f15f987d01.host.control-plane.oxide.internal - name: _nexus._tcp (records: 3) + name: _nexus._tcp (records: 6) SRV port 12221 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host.control-plane.oxide.internal + SRV port 12221 26fbf986-e560-4449-a351-547d1721b90e.host.control-plane.oxide.internal SRV port 12221 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host.control-plane.oxide.internal + SRV port 12221 43cbc3a6-e640-43f5-a9a2-f83eff427870.host.control-plane.oxide.internal SRV port 12221 466a9f29-62bf-4e63-924a-b9efdb86afec.host.control-plane.oxide.internal + SRV port 12221 a67ad53f-d551-40e7-abae-57664779b27b.host.control-plane.oxide.internal name: _oximeter-reader._tcp (records: 1) SRV port 9000 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal name: _repo-depot._tcp (records: 3) SRV port 12348 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled.control-plane.oxide.internal SRV port 12348 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6.sled.control-plane.oxide.internal SRV port 12348 d81c6a84-79b8-4958-ae41-ea46c9b19763.sled.control-plane.oxide.internal + name: a67ad53f-d551-40e7-abae-57664779b27b.host (records: 1) + AAAA fd00:1122:3344:102::29 name: ad6a3a03-8d0f-4504-99a4-cbf73d69b973.host (records: 1) AAAA fd00:1122:3344:102::25 name: ba4994a8-23f9-4b1a-a84f-a08d74591389.host (records: 1) @@ -2762,9 +3138,12 @@ external DNS: NS ns1.oxide.example NS ns2.oxide.example NS ns3.oxide.example - name: example-silo.sys (records: 3) + name: example-silo.sys (records: 6) A 192.0.2.2 + A 192.0.2.7 A 192.0.2.3 + A 192.0.2.6 + A 192.0.2.5 A 192.0.2.4 name: ns1 (records: 1) A 198.51.100.1 @@ -2788,12 +3167,12 @@ set sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c SP versions: inactive -> 0.5.0 generated inventory collection b5263998-e486-4cea-8842-b32bd326fa3a from configured sleds > blueprint-plan latest latest -INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 0, num_eligible: 0, num_ineligible: 9 +INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 10, num_already_artifact: 1, num_eligible: 0, num_ineligible: 9 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: b, expected_hash: 0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b -INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 9, num_already_artifact: 1, num_eligible: 0, num_ineligible: 8 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a -INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 9, num_already_artifact: 1, num_eligible: 0, num_ineligible: 8 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: b, expected_hash: 0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b INFO MGS-driven update impossible (will remove it and re-evaluate board), artifact_version: 1.0.0, artifact_hash: 68465b8e3f808f475510b525cfd62086d37ddd57688bd854184fdafb2b2198a4, expected_inactive_version: NoValidVersion, expected_active_version: 0.0.1, component: sp, sp_slot: 1, sp_type: Sled, serial_number: serial1, part_number: model1 @@ -2807,6 +3186,7 @@ chicken switches: * 1 pending MGS update: * model1:serial1: Sp(PendingMgsUpdateSpDetails { expected_active_version: ArtifactVersion("0.0.1"), expected_inactive_version: Version(ArtifactVersion("0.5.0")) }) * zone updates waiting on pending MGS updates (RoT / SP / Host OS / etc.) +* waiting to update top-level nexus_generation: pending non-nexus zone updates > blueprint-diff latest @@ -2821,6 +3201,7 @@ to: blueprint 12d602a6-5ab4-487a-b94e-eb30cdf30300 internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) @@ -2842,6 +3223,8 @@ internal DNS: AAAA fd00:1122:3344:101::27 name: 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host (records: 1) AAAA fd00:1122:3344:101::22 + name: 26fbf986-e560-4449-a351-547d1721b90e.host (records: 1) + AAAA fd00:1122:3344:103::28 name: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled (records: 1) AAAA fd00:1122:3344:102::1 name: 353b3b65-20f7-48c3-88f7-495bd5d31545.host (records: 1) @@ -2850,6 +3233,8 @@ internal DNS: AAAA fd00:1122:3344:103::22 name: 427ec88f-f467-42fa-9bbb-66a91a36103c.host (records: 1) AAAA fd00:1122:3344:2::1 + name: 43cbc3a6-e640-43f5-a9a2-f83eff427870.host (records: 1) + AAAA fd00:1122:3344:101::28 name: 466a9f29-62bf-4e63-924a-b9efdb86afec.host (records: 1) AAAA fd00:1122:3344:102::22 name: 5199c033-4cf9-4ab6-8ae7-566bd7606363.host (records: 1) @@ -2918,16 +3303,21 @@ internal DNS: SRV port 5353 427ec88f-f467-42fa-9bbb-66a91a36103c.host.control-plane.oxide.internal SRV port 5353 99e2f30b-3174-40bf-a78a-90da8abba8ca.host.control-plane.oxide.internal SRV port 5353 ea5b4030-b52f-44b2-8d70-45f15f987d01.host.control-plane.oxide.internal - name: _nexus._tcp (records: 3) + name: _nexus._tcp (records: 6) SRV port 12221 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host.control-plane.oxide.internal + SRV port 12221 26fbf986-e560-4449-a351-547d1721b90e.host.control-plane.oxide.internal SRV port 12221 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host.control-plane.oxide.internal + SRV port 12221 43cbc3a6-e640-43f5-a9a2-f83eff427870.host.control-plane.oxide.internal SRV port 12221 466a9f29-62bf-4e63-924a-b9efdb86afec.host.control-plane.oxide.internal + SRV port 12221 a67ad53f-d551-40e7-abae-57664779b27b.host.control-plane.oxide.internal name: _oximeter-reader._tcp (records: 1) SRV port 9000 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal name: _repo-depot._tcp (records: 3) SRV port 12348 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled.control-plane.oxide.internal SRV port 12348 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6.sled.control-plane.oxide.internal SRV port 12348 d81c6a84-79b8-4958-ae41-ea46c9b19763.sled.control-plane.oxide.internal + name: a67ad53f-d551-40e7-abae-57664779b27b.host (records: 1) + AAAA fd00:1122:3344:102::29 name: ad6a3a03-8d0f-4504-99a4-cbf73d69b973.host (records: 1) AAAA fd00:1122:3344:102::25 name: ba4994a8-23f9-4b1a-a84f-a08d74591389.host (records: 1) @@ -2961,9 +3351,12 @@ external DNS: NS ns1.oxide.example NS ns2.oxide.example NS ns3.oxide.example - name: example-silo.sys (records: 3) + name: example-silo.sys (records: 6) A 192.0.2.2 + A 192.0.2.7 A 192.0.2.3 + A 192.0.2.6 + A 192.0.2.5 A 192.0.2.4 name: ns1 (records: 1) A 198.51.100.1 @@ -2985,12 +3378,12 @@ generated inventory collection 68767302-7fed-4eb1-9611-3dfd807ff0cd from configu > # Planning should remove this update and add an OS update for this sled. > blueprint-plan latest latest -INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 0, num_eligible: 0, num_ineligible: 9 +INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 10, num_already_artifact: 1, num_eligible: 0, num_ineligible: 9 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: b, expected_hash: 0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b -INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 9, num_already_artifact: 1, num_eligible: 0, num_ineligible: 8 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a -INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 9, num_already_artifact: 1, num_eligible: 0, num_ineligible: 8 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: b, expected_hash: 0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b INFO MGS-driven update completed (will remove it and re-evaluate board), artifact_version: 1.0.0, artifact_hash: 68465b8e3f808f475510b525cfd62086d37ddd57688bd854184fdafb2b2198a4, expected_inactive_version: Version(ArtifactVersion("0.5.0")), expected_active_version: 0.0.1, component: sp, sp_slot: 1, sp_type: Sled, serial_number: serial1, part_number: model1 @@ -3004,6 +3397,7 @@ chicken switches: * 1 pending MGS update: * model1:serial1: HostPhase1(PendingMgsUpdateHostPhase1Details { expected_active_phase_1_slot: A, expected_boot_disk: A, expected_active_phase_1_hash: ArtifactHash("0101010101010101010101010101010101010101010101010101010101010101"), expected_active_phase_2_hash: ArtifactHash("0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a"), expected_inactive_phase_1_hash: ArtifactHash("0202020202020202020202020202020202020202020202020202020202020202"), expected_inactive_phase_2_hash: ArtifactHash("f3dd0c7a1bd4500ea0d8bcf67581f576d47752b2f1998a4cb0f0c3155c483008"), sled_agent_address: [fd00:1122:3344:102::1]:12345 }) * zone updates waiting on pending MGS updates (RoT / SP / Host OS / etc.) +* waiting to update top-level nexus_generation: pending non-nexus zone updates > blueprint-diff latest @@ -3012,7 +3406,7 @@ to: blueprint 61a93ea3-c872-48e0-aace-e86b0c52b839 MODIFIED SLEDS: - sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c (active, config generation 2 -> 3): + sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c (active, config generation 3 -> 4): host phase 2 contents: -------------------------------- @@ -3053,6 +3447,7 @@ to: blueprint 61a93ea3-c872-48e0-aace-e86b0c52b839 oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_external_dns_6c3ae381-04f7-41ea-b0ac-74db387dbc3a 8c4fa711-1d5d-4e93-85f0-d17bff47b063 in service none none off oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_internal_dns_99e2f30b-3174-40bf-a78a-90da8abba8ca c31623de-c19b-4615-9f1d-5e1daa5d3bda in service none none off oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_nexus_466a9f29-62bf-4e63-924a-b9efdb86afec 3560dd69-3b23-4c69-807d-d673104cfc68 in service none none off + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/zone/oxz_nexus_a67ad53f-d551-40e7-abae-57664779b27b 9edcc144-9dd9-4bf9-a26d-26f265400b0b in service none none off oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_ntp_62620961-fc4a-481e-968b-f5acbac0dc63 09b9cc9b-3426-470b-a7bc-538f82dede03 in service none none off oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/debug 93957ca0-9ed1-4e7b-8c34-2ce07a69541c in service 100 GiB none gzip-9 oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/debug 2db6b7c1-0f46-4ced-a3ad-48872793360e in service 100 GiB none gzip-9 @@ -3060,18 +3455,19 @@ to: blueprint 61a93ea3-c872-48e0-aace-e86b0c52b839 omicron zones: - --------------------------------------------------------------------------------------------------------------- - zone type zone id image source disposition underlay IP - --------------------------------------------------------------------------------------------------------------- - clickhouse 353b3b65-20f7-48c3-88f7-495bd5d31545 install dataset in service fd00:1122:3344:102::23 - crucible 86a22a56-0168-453d-9df1-cb2a7c64b5d3 install dataset in service fd00:1122:3344:102::28 - crucible bd354eef-d8a6-4165-9124-283fb5e46d77 install dataset in service fd00:1122:3344:102::26 - crucible e2fdefe7-95b2-4fd2-ae37-56929a06d58c install dataset in service fd00:1122:3344:102::27 - crucible_pantry ad6a3a03-8d0f-4504-99a4-cbf73d69b973 install dataset in service fd00:1122:3344:102::25 - external_dns 6c3ae381-04f7-41ea-b0ac-74db387dbc3a install dataset in service fd00:1122:3344:102::24 - internal_dns 99e2f30b-3174-40bf-a78a-90da8abba8ca install dataset in service fd00:1122:3344:1::1 - internal_ntp 62620961-fc4a-481e-968b-f5acbac0dc63 install dataset in service fd00:1122:3344:102::21 - nexus 466a9f29-62bf-4e63-924a-b9efdb86afec install dataset in service fd00:1122:3344:102::22 + ----------------------------------------------------------------------------------------------------------------------- + zone type zone id image source disposition underlay IP + ----------------------------------------------------------------------------------------------------------------------- + clickhouse 353b3b65-20f7-48c3-88f7-495bd5d31545 install dataset in service fd00:1122:3344:102::23 + crucible 86a22a56-0168-453d-9df1-cb2a7c64b5d3 install dataset in service fd00:1122:3344:102::28 + crucible bd354eef-d8a6-4165-9124-283fb5e46d77 install dataset in service fd00:1122:3344:102::26 + crucible e2fdefe7-95b2-4fd2-ae37-56929a06d58c install dataset in service fd00:1122:3344:102::27 + crucible_pantry ad6a3a03-8d0f-4504-99a4-cbf73d69b973 install dataset in service fd00:1122:3344:102::25 + external_dns 6c3ae381-04f7-41ea-b0ac-74db387dbc3a install dataset in service fd00:1122:3344:102::24 + internal_dns 99e2f30b-3174-40bf-a78a-90da8abba8ca install dataset in service fd00:1122:3344:1::1 + internal_ntp 62620961-fc4a-481e-968b-f5acbac0dc63 install dataset in service fd00:1122:3344:102::21 + nexus 466a9f29-62bf-4e63-924a-b9efdb86afec install dataset in service fd00:1122:3344:102::22 + nexus a67ad53f-d551-40e7-abae-57664779b27b artifact: version 1.0.0 in service fd00:1122:3344:102::29 COCKROACHDB SETTINGS: @@ -3082,6 +3478,7 @@ to: blueprint 61a93ea3-c872-48e0-aace-e86b0c52b839 internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) @@ -3103,6 +3500,8 @@ internal DNS: AAAA fd00:1122:3344:101::27 name: 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host (records: 1) AAAA fd00:1122:3344:101::22 + name: 26fbf986-e560-4449-a351-547d1721b90e.host (records: 1) + AAAA fd00:1122:3344:103::28 name: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled (records: 1) AAAA fd00:1122:3344:102::1 name: 353b3b65-20f7-48c3-88f7-495bd5d31545.host (records: 1) @@ -3111,6 +3510,8 @@ internal DNS: AAAA fd00:1122:3344:103::22 name: 427ec88f-f467-42fa-9bbb-66a91a36103c.host (records: 1) AAAA fd00:1122:3344:2::1 + name: 43cbc3a6-e640-43f5-a9a2-f83eff427870.host (records: 1) + AAAA fd00:1122:3344:101::28 name: 466a9f29-62bf-4e63-924a-b9efdb86afec.host (records: 1) AAAA fd00:1122:3344:102::22 name: 5199c033-4cf9-4ab6-8ae7-566bd7606363.host (records: 1) @@ -3179,16 +3580,21 @@ internal DNS: SRV port 5353 427ec88f-f467-42fa-9bbb-66a91a36103c.host.control-plane.oxide.internal SRV port 5353 99e2f30b-3174-40bf-a78a-90da8abba8ca.host.control-plane.oxide.internal SRV port 5353 ea5b4030-b52f-44b2-8d70-45f15f987d01.host.control-plane.oxide.internal - name: _nexus._tcp (records: 3) + name: _nexus._tcp (records: 6) SRV port 12221 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host.control-plane.oxide.internal + SRV port 12221 26fbf986-e560-4449-a351-547d1721b90e.host.control-plane.oxide.internal SRV port 12221 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host.control-plane.oxide.internal + SRV port 12221 43cbc3a6-e640-43f5-a9a2-f83eff427870.host.control-plane.oxide.internal SRV port 12221 466a9f29-62bf-4e63-924a-b9efdb86afec.host.control-plane.oxide.internal + SRV port 12221 a67ad53f-d551-40e7-abae-57664779b27b.host.control-plane.oxide.internal name: _oximeter-reader._tcp (records: 1) SRV port 9000 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal name: _repo-depot._tcp (records: 3) SRV port 12348 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled.control-plane.oxide.internal SRV port 12348 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6.sled.control-plane.oxide.internal SRV port 12348 d81c6a84-79b8-4958-ae41-ea46c9b19763.sled.control-plane.oxide.internal + name: a67ad53f-d551-40e7-abae-57664779b27b.host (records: 1) + AAAA fd00:1122:3344:102::29 name: ad6a3a03-8d0f-4504-99a4-cbf73d69b973.host (records: 1) AAAA fd00:1122:3344:102::25 name: ba4994a8-23f9-4b1a-a84f-a08d74591389.host (records: 1) @@ -3222,9 +3628,12 @@ external DNS: NS ns1.oxide.example NS ns2.oxide.example NS ns3.oxide.example - name: example-silo.sys (records: 3) + name: example-silo.sys (records: 6) A 192.0.2.2 + A 192.0.2.7 A 192.0.2.3 + A 192.0.2.6 + A 192.0.2.5 A 192.0.2.4 name: ns1 (records: 1) A 198.51.100.1 @@ -3250,11 +3659,11 @@ set sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c host phase 1 details: B -> fffffff generated inventory collection 62898097-2ff1-48d0-8bc1-91b475daa33d from configured sleds > blueprint-plan latest latest -INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 0, num_eligible: 0, num_ineligible: 9 +INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 10, num_already_artifact: 1, num_eligible: 0, num_ineligible: 9 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a -INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 9, num_already_artifact: 1, num_eligible: 0, num_ineligible: 8 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a -INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 9, num_already_artifact: 1, num_eligible: 0, num_ineligible: 8 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: b, expected_hash: 0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b INFO MGS-driven update impossible (will remove it and re-evaluate board), artifact_version: 1.0.0, artifact_hash: 2053f8594971bbf0a7326c833e2ffc12b065b9d823b9c0b967d275fa595e4e89, sled_agent_address: [fd00:1122:3344:102::1]:12345, expected_inactive_phase_2_hash: f3dd0c7a1bd4500ea0d8bcf67581f576d47752b2f1998a4cb0f0c3155c483008, expected_inactive_phase_1_hash: 0202020202020202020202020202020202020202020202020202020202020202, expected_active_phase_2_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a, expected_active_phase_1_hash: 0101010101010101010101010101010101010101010101010101010101010101, expected_boot_disk: A, expected_active_phase_1_slot: A, component: host_phase_1, sp_slot: 1, sp_type: Sled, serial_number: serial1, part_number: model1 @@ -3268,6 +3677,7 @@ chicken switches: * 1 pending MGS update: * model1:serial1: HostPhase1(PendingMgsUpdateHostPhase1Details { expected_active_phase_1_slot: A, expected_boot_disk: A, expected_active_phase_1_hash: ArtifactHash("0101010101010101010101010101010101010101010101010101010101010101"), expected_active_phase_2_hash: ArtifactHash("0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a"), expected_inactive_phase_1_hash: ArtifactHash("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"), expected_inactive_phase_2_hash: ArtifactHash("f3dd0c7a1bd4500ea0d8bcf67581f576d47752b2f1998a4cb0f0c3155c483008"), sled_agent_address: [fd00:1122:3344:102::1]:12345 }) * zone updates waiting on pending MGS updates (RoT / SP / Host OS / etc.) +* waiting to update top-level nexus_generation: pending non-nexus zone updates > blueprint-diff latest @@ -3282,6 +3692,7 @@ to: blueprint 27e755bc-dc10-4647-853c-f89bb3a15a2c internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) @@ -3303,6 +3714,8 @@ internal DNS: AAAA fd00:1122:3344:101::27 name: 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host (records: 1) AAAA fd00:1122:3344:101::22 + name: 26fbf986-e560-4449-a351-547d1721b90e.host (records: 1) + AAAA fd00:1122:3344:103::28 name: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled (records: 1) AAAA fd00:1122:3344:102::1 name: 353b3b65-20f7-48c3-88f7-495bd5d31545.host (records: 1) @@ -3311,6 +3724,8 @@ internal DNS: AAAA fd00:1122:3344:103::22 name: 427ec88f-f467-42fa-9bbb-66a91a36103c.host (records: 1) AAAA fd00:1122:3344:2::1 + name: 43cbc3a6-e640-43f5-a9a2-f83eff427870.host (records: 1) + AAAA fd00:1122:3344:101::28 name: 466a9f29-62bf-4e63-924a-b9efdb86afec.host (records: 1) AAAA fd00:1122:3344:102::22 name: 5199c033-4cf9-4ab6-8ae7-566bd7606363.host (records: 1) @@ -3379,16 +3794,21 @@ internal DNS: SRV port 5353 427ec88f-f467-42fa-9bbb-66a91a36103c.host.control-plane.oxide.internal SRV port 5353 99e2f30b-3174-40bf-a78a-90da8abba8ca.host.control-plane.oxide.internal SRV port 5353 ea5b4030-b52f-44b2-8d70-45f15f987d01.host.control-plane.oxide.internal - name: _nexus._tcp (records: 3) + name: _nexus._tcp (records: 6) SRV port 12221 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host.control-plane.oxide.internal + SRV port 12221 26fbf986-e560-4449-a351-547d1721b90e.host.control-plane.oxide.internal SRV port 12221 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host.control-plane.oxide.internal + SRV port 12221 43cbc3a6-e640-43f5-a9a2-f83eff427870.host.control-plane.oxide.internal SRV port 12221 466a9f29-62bf-4e63-924a-b9efdb86afec.host.control-plane.oxide.internal + SRV port 12221 a67ad53f-d551-40e7-abae-57664779b27b.host.control-plane.oxide.internal name: _oximeter-reader._tcp (records: 1) SRV port 9000 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal name: _repo-depot._tcp (records: 3) SRV port 12348 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled.control-plane.oxide.internal SRV port 12348 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6.sled.control-plane.oxide.internal SRV port 12348 d81c6a84-79b8-4958-ae41-ea46c9b19763.sled.control-plane.oxide.internal + name: a67ad53f-d551-40e7-abae-57664779b27b.host (records: 1) + AAAA fd00:1122:3344:102::29 name: ad6a3a03-8d0f-4504-99a4-cbf73d69b973.host (records: 1) AAAA fd00:1122:3344:102::25 name: ba4994a8-23f9-4b1a-a84f-a08d74591389.host (records: 1) @@ -3422,9 +3842,12 @@ external DNS: NS ns1.oxide.example NS ns2.oxide.example NS ns3.oxide.example - name: example-silo.sys (records: 3) + name: example-silo.sys (records: 6) A 192.0.2.2 + A 192.0.2.7 A 192.0.2.3 + A 192.0.2.6 + A 192.0.2.5 A 192.0.2.4 name: ns1 (records: 1) A 198.51.100.1 @@ -3450,11 +3873,11 @@ generated inventory collection 3086f142-62d3-4f77-bda3-674afbb42d0d from configu > # Another planning step should try to update the last sled, starting with the > # RoT bootloader. > blueprint-plan latest latest -INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 0, num_eligible: 0, num_ineligible: 9 +INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 10, num_already_artifact: 1, num_eligible: 0, num_ineligible: 9 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a -INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 9, num_already_artifact: 1, num_eligible: 0, num_ineligible: 8 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a -INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 9, num_already_artifact: 1, num_eligible: 0, num_ineligible: 8 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: b, expected_hash: 0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b INFO MGS-driven update completed (will remove it and re-evaluate board), artifact_version: 1.0.0, artifact_hash: 2053f8594971bbf0a7326c833e2ffc12b065b9d823b9c0b967d275fa595e4e89, sled_agent_address: [fd00:1122:3344:102::1]:12345, expected_inactive_phase_2_hash: f3dd0c7a1bd4500ea0d8bcf67581f576d47752b2f1998a4cb0f0c3155c483008, expected_inactive_phase_1_hash: ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff, expected_active_phase_2_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a, expected_active_phase_1_hash: 0101010101010101010101010101010101010101010101010101010101010101, expected_boot_disk: A, expected_active_phase_1_slot: A, component: host_phase_1, sp_slot: 1, sp_type: Sled, serial_number: serial1, part_number: model1 @@ -3470,6 +3893,7 @@ chicken switches: * 1 pending MGS update: * model2:serial2: RotBootloader(PendingMgsUpdateRotBootloaderDetails { expected_stage0_version: ArtifactVersion("0.0.1"), expected_stage0_next_version: NoValidVersion }) * zone updates waiting on pending MGS updates (RoT / SP / Host OS / etc.) +* waiting to update top-level nexus_generation: pending non-nexus zone updates > blueprint-diff latest @@ -3484,6 +3908,7 @@ to: blueprint 9f89efdf-a23e-4137-b7cc-79f4a91cbe1f internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) @@ -3505,6 +3930,8 @@ internal DNS: AAAA fd00:1122:3344:101::27 name: 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host (records: 1) AAAA fd00:1122:3344:101::22 + name: 26fbf986-e560-4449-a351-547d1721b90e.host (records: 1) + AAAA fd00:1122:3344:103::28 name: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled (records: 1) AAAA fd00:1122:3344:102::1 name: 353b3b65-20f7-48c3-88f7-495bd5d31545.host (records: 1) @@ -3513,6 +3940,8 @@ internal DNS: AAAA fd00:1122:3344:103::22 name: 427ec88f-f467-42fa-9bbb-66a91a36103c.host (records: 1) AAAA fd00:1122:3344:2::1 + name: 43cbc3a6-e640-43f5-a9a2-f83eff427870.host (records: 1) + AAAA fd00:1122:3344:101::28 name: 466a9f29-62bf-4e63-924a-b9efdb86afec.host (records: 1) AAAA fd00:1122:3344:102::22 name: 5199c033-4cf9-4ab6-8ae7-566bd7606363.host (records: 1) @@ -3581,16 +4010,21 @@ internal DNS: SRV port 5353 427ec88f-f467-42fa-9bbb-66a91a36103c.host.control-plane.oxide.internal SRV port 5353 99e2f30b-3174-40bf-a78a-90da8abba8ca.host.control-plane.oxide.internal SRV port 5353 ea5b4030-b52f-44b2-8d70-45f15f987d01.host.control-plane.oxide.internal - name: _nexus._tcp (records: 3) + name: _nexus._tcp (records: 6) SRV port 12221 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host.control-plane.oxide.internal + SRV port 12221 26fbf986-e560-4449-a351-547d1721b90e.host.control-plane.oxide.internal SRV port 12221 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host.control-plane.oxide.internal + SRV port 12221 43cbc3a6-e640-43f5-a9a2-f83eff427870.host.control-plane.oxide.internal SRV port 12221 466a9f29-62bf-4e63-924a-b9efdb86afec.host.control-plane.oxide.internal + SRV port 12221 a67ad53f-d551-40e7-abae-57664779b27b.host.control-plane.oxide.internal name: _oximeter-reader._tcp (records: 1) SRV port 9000 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal name: _repo-depot._tcp (records: 3) SRV port 12348 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled.control-plane.oxide.internal SRV port 12348 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6.sled.control-plane.oxide.internal SRV port 12348 d81c6a84-79b8-4958-ae41-ea46c9b19763.sled.control-plane.oxide.internal + name: a67ad53f-d551-40e7-abae-57664779b27b.host (records: 1) + AAAA fd00:1122:3344:102::29 name: ad6a3a03-8d0f-4504-99a4-cbf73d69b973.host (records: 1) AAAA fd00:1122:3344:102::25 name: ba4994a8-23f9-4b1a-a84f-a08d74591389.host (records: 1) @@ -3624,9 +4058,12 @@ external DNS: NS ns1.oxide.example NS ns2.oxide.example NS ns3.oxide.example - name: example-silo.sys (records: 3) + name: example-silo.sys (records: 6) A 192.0.2.2 + A 192.0.2.7 A 192.0.2.3 + A 192.0.2.6 + A 192.0.2.5 A 192.0.2.4 name: ns1 (records: 1) A 198.51.100.1 @@ -3647,11 +4084,11 @@ set sled d81c6a84-79b8-4958-ae41-ea46c9b19763 RoT bootloader versions: stage0 -> generated inventory collection ae5b3bb4-ce21-465f-b18e-857614732d66 from configured sleds > blueprint-plan latest latest -INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 0, num_eligible: 0, num_ineligible: 9 +INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 10, num_already_artifact: 1, num_eligible: 0, num_ineligible: 9 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a -INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 9, num_already_artifact: 1, num_eligible: 0, num_ineligible: 8 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a -INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 9, num_already_artifact: 1, num_eligible: 0, num_ineligible: 8 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: b, expected_hash: 0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b INFO MGS-driven update completed (will remove it and re-evaluate board), artifact_version: 1.0.0, artifact_hash: 5b0f601b1fbb8674db9c751a02f8b14f8e6d4e8470f4f7b686fecb2c49ec11f9, expected_stage0_next_version: NoValidVersion, expected_stage0_version: 0.0.1, component: rot_bootloader, sp_slot: 2, sp_type: Sled, serial_number: serial2, part_number: model2 @@ -3665,6 +4102,7 @@ chicken switches: * 1 pending MGS update: * model2:serial2: Rot(PendingMgsUpdateRotDetails { expected_active_slot: ExpectedActiveRotSlot { slot: A, version: ArtifactVersion("0.0.2") }, expected_inactive_version: NoValidVersion, expected_persistent_boot_preference: A, expected_pending_persistent_boot_preference: None, expected_transient_boot_preference: None }) * zone updates waiting on pending MGS updates (RoT / SP / Host OS / etc.) +* waiting to update top-level nexus_generation: pending non-nexus zone updates > blueprint-diff latest @@ -3679,6 +4117,7 @@ to: blueprint 9a9e6c32-5a84-4020-a159-33dceff18d35 internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) @@ -3700,6 +4139,8 @@ internal DNS: AAAA fd00:1122:3344:101::27 name: 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host (records: 1) AAAA fd00:1122:3344:101::22 + name: 26fbf986-e560-4449-a351-547d1721b90e.host (records: 1) + AAAA fd00:1122:3344:103::28 name: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled (records: 1) AAAA fd00:1122:3344:102::1 name: 353b3b65-20f7-48c3-88f7-495bd5d31545.host (records: 1) @@ -3708,6 +4149,8 @@ internal DNS: AAAA fd00:1122:3344:103::22 name: 427ec88f-f467-42fa-9bbb-66a91a36103c.host (records: 1) AAAA fd00:1122:3344:2::1 + name: 43cbc3a6-e640-43f5-a9a2-f83eff427870.host (records: 1) + AAAA fd00:1122:3344:101::28 name: 466a9f29-62bf-4e63-924a-b9efdb86afec.host (records: 1) AAAA fd00:1122:3344:102::22 name: 5199c033-4cf9-4ab6-8ae7-566bd7606363.host (records: 1) @@ -3776,16 +4219,21 @@ internal DNS: SRV port 5353 427ec88f-f467-42fa-9bbb-66a91a36103c.host.control-plane.oxide.internal SRV port 5353 99e2f30b-3174-40bf-a78a-90da8abba8ca.host.control-plane.oxide.internal SRV port 5353 ea5b4030-b52f-44b2-8d70-45f15f987d01.host.control-plane.oxide.internal - name: _nexus._tcp (records: 3) + name: _nexus._tcp (records: 6) SRV port 12221 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host.control-plane.oxide.internal + SRV port 12221 26fbf986-e560-4449-a351-547d1721b90e.host.control-plane.oxide.internal SRV port 12221 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host.control-plane.oxide.internal + SRV port 12221 43cbc3a6-e640-43f5-a9a2-f83eff427870.host.control-plane.oxide.internal SRV port 12221 466a9f29-62bf-4e63-924a-b9efdb86afec.host.control-plane.oxide.internal + SRV port 12221 a67ad53f-d551-40e7-abae-57664779b27b.host.control-plane.oxide.internal name: _oximeter-reader._tcp (records: 1) SRV port 9000 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal name: _repo-depot._tcp (records: 3) SRV port 12348 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled.control-plane.oxide.internal SRV port 12348 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6.sled.control-plane.oxide.internal SRV port 12348 d81c6a84-79b8-4958-ae41-ea46c9b19763.sled.control-plane.oxide.internal + name: a67ad53f-d551-40e7-abae-57664779b27b.host (records: 1) + AAAA fd00:1122:3344:102::29 name: ad6a3a03-8d0f-4504-99a4-cbf73d69b973.host (records: 1) AAAA fd00:1122:3344:102::25 name: ba4994a8-23f9-4b1a-a84f-a08d74591389.host (records: 1) @@ -3819,9 +4267,12 @@ external DNS: NS ns1.oxide.example NS ns2.oxide.example NS ns3.oxide.example - name: example-silo.sys (records: 3) + name: example-silo.sys (records: 6) A 192.0.2.2 + A 192.0.2.7 A 192.0.2.3 + A 192.0.2.6 + A 192.0.2.5 A 192.0.2.4 name: ns1 (records: 1) A 198.51.100.1 @@ -3842,11 +4293,11 @@ set sled d81c6a84-79b8-4958-ae41-ea46c9b19763 RoT settings: slot a -> 1.0.0 generated inventory collection 34c3258c-b2ab-4da9-9720-41a3a703c3d7 from configured sleds > blueprint-plan latest latest -INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 0, num_eligible: 0, num_ineligible: 9 +INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 10, num_already_artifact: 1, num_eligible: 0, num_ineligible: 9 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a -INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 9, num_already_artifact: 1, num_eligible: 0, num_ineligible: 8 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a -INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 9, num_already_artifact: 1, num_eligible: 0, num_ineligible: 8 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: b, expected_hash: 0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b INFO MGS-driven update completed (will remove it and re-evaluate board), artifact_version: 1.0.0, artifact_hash: d11e65f934bf0de51df2e5b484f61ee72072417b43ac87f33e958008428e7b02, expected_transient_boot_preference: None, expected_pending_persistent_boot_preference: None, expected_persistent_boot_preference: A, expected_active_slot: ExpectedActiveRotSlot { slot: A, version: ArtifactVersion("0.0.2") }, expected_inactive_version: NoValidVersion, component: rot, sp_slot: 2, sp_type: Sled, serial_number: serial2, part_number: model2 @@ -3860,6 +4311,7 @@ chicken switches: * 1 pending MGS update: * model2:serial2: Sp(PendingMgsUpdateSpDetails { expected_active_version: ArtifactVersion("0.0.1"), expected_inactive_version: NoValidVersion }) * zone updates waiting on pending MGS updates (RoT / SP / Host OS / etc.) +* waiting to update top-level nexus_generation: pending non-nexus zone updates > blueprint-diff latest @@ -3874,6 +4326,7 @@ to: blueprint 13cfdd24-52ba-4e94-8c83-02e3a48fc746 internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) @@ -3895,6 +4348,8 @@ internal DNS: AAAA fd00:1122:3344:101::27 name: 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host (records: 1) AAAA fd00:1122:3344:101::22 + name: 26fbf986-e560-4449-a351-547d1721b90e.host (records: 1) + AAAA fd00:1122:3344:103::28 name: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled (records: 1) AAAA fd00:1122:3344:102::1 name: 353b3b65-20f7-48c3-88f7-495bd5d31545.host (records: 1) @@ -3903,6 +4358,8 @@ internal DNS: AAAA fd00:1122:3344:103::22 name: 427ec88f-f467-42fa-9bbb-66a91a36103c.host (records: 1) AAAA fd00:1122:3344:2::1 + name: 43cbc3a6-e640-43f5-a9a2-f83eff427870.host (records: 1) + AAAA fd00:1122:3344:101::28 name: 466a9f29-62bf-4e63-924a-b9efdb86afec.host (records: 1) AAAA fd00:1122:3344:102::22 name: 5199c033-4cf9-4ab6-8ae7-566bd7606363.host (records: 1) @@ -3971,16 +4428,21 @@ internal DNS: SRV port 5353 427ec88f-f467-42fa-9bbb-66a91a36103c.host.control-plane.oxide.internal SRV port 5353 99e2f30b-3174-40bf-a78a-90da8abba8ca.host.control-plane.oxide.internal SRV port 5353 ea5b4030-b52f-44b2-8d70-45f15f987d01.host.control-plane.oxide.internal - name: _nexus._tcp (records: 3) + name: _nexus._tcp (records: 6) SRV port 12221 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host.control-plane.oxide.internal + SRV port 12221 26fbf986-e560-4449-a351-547d1721b90e.host.control-plane.oxide.internal SRV port 12221 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host.control-plane.oxide.internal + SRV port 12221 43cbc3a6-e640-43f5-a9a2-f83eff427870.host.control-plane.oxide.internal SRV port 12221 466a9f29-62bf-4e63-924a-b9efdb86afec.host.control-plane.oxide.internal + SRV port 12221 a67ad53f-d551-40e7-abae-57664779b27b.host.control-plane.oxide.internal name: _oximeter-reader._tcp (records: 1) SRV port 9000 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal name: _repo-depot._tcp (records: 3) SRV port 12348 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled.control-plane.oxide.internal SRV port 12348 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6.sled.control-plane.oxide.internal SRV port 12348 d81c6a84-79b8-4958-ae41-ea46c9b19763.sled.control-plane.oxide.internal + name: a67ad53f-d551-40e7-abae-57664779b27b.host (records: 1) + AAAA fd00:1122:3344:102::29 name: ad6a3a03-8d0f-4504-99a4-cbf73d69b973.host (records: 1) AAAA fd00:1122:3344:102::25 name: ba4994a8-23f9-4b1a-a84f-a08d74591389.host (records: 1) @@ -4014,9 +4476,12 @@ external DNS: NS ns1.oxide.example NS ns2.oxide.example NS ns3.oxide.example - name: example-silo.sys (records: 3) + name: example-silo.sys (records: 6) A 192.0.2.2 + A 192.0.2.7 A 192.0.2.3 + A 192.0.2.6 + A 192.0.2.5 A 192.0.2.4 name: ns1 (records: 1) A 198.51.100.1 @@ -4037,11 +4502,11 @@ set sled d81c6a84-79b8-4958-ae41-ea46c9b19763 SP versions: active -> 1.0.0 generated inventory collection 5e106b73-6a14-4955-b8a8-a4f8afed6405 from configured sleds > blueprint-plan latest latest -INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 0, num_eligible: 0, num_ineligible: 9 +INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 10, num_already_artifact: 1, num_eligible: 0, num_ineligible: 9 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a -INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 9, num_already_artifact: 1, num_eligible: 0, num_ineligible: 8 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a -INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 9, num_already_artifact: 1, num_eligible: 0, num_ineligible: 8 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: b, expected_hash: 0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b INFO MGS-driven update completed (will remove it and re-evaluate board), artifact_version: 1.0.0, artifact_hash: 68465b8e3f808f475510b525cfd62086d37ddd57688bd854184fdafb2b2198a4, expected_inactive_version: NoValidVersion, expected_active_version: 0.0.1, component: sp, sp_slot: 2, sp_type: Sled, serial_number: serial2, part_number: model2 @@ -4055,6 +4520,7 @@ chicken switches: * 1 pending MGS update: * model2:serial2: HostPhase1(PendingMgsUpdateHostPhase1Details { expected_active_phase_1_slot: A, expected_boot_disk: A, expected_active_phase_1_hash: ArtifactHash("0101010101010101010101010101010101010101010101010101010101010101"), expected_active_phase_2_hash: ArtifactHash("0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a"), expected_inactive_phase_1_hash: ArtifactHash("0202020202020202020202020202020202020202020202020202020202020202"), expected_inactive_phase_2_hash: ArtifactHash("f3dd0c7a1bd4500ea0d8bcf67581f576d47752b2f1998a4cb0f0c3155c483008"), sled_agent_address: [fd00:1122:3344:103::1]:12345 }) * zone updates waiting on pending MGS updates (RoT / SP / Host OS / etc.) +* waiting to update top-level nexus_generation: pending non-nexus zone updates > blueprint-diff latest @@ -4063,7 +4529,7 @@ to: blueprint b82656b0-a9be-433d-83d0-e2bdf371777a MODIFIED SLEDS: - sled d81c6a84-79b8-4958-ae41-ea46c9b19763 (active, config generation 2 -> 3): + sled d81c6a84-79b8-4958-ae41-ea46c9b19763 (active, config generation 3 -> 4): host phase 2 contents: -------------------------------- @@ -4101,6 +4567,7 @@ to: blueprint b82656b0-a9be-433d-83d0-e2bdf371777a oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_crucible_pantry_75b220ba-a0f4-4872-8202-dc7c87f062d0 54bbadaf-ec04-41a2-a62f-f5ac5bf321be in service none none off oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_external_dns_f6ec9c67-946a-4da3-98d5-581f72ce8bf0 090bd88d-0a43-4040-a832-b13ae721f74f in service none none off oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_internal_dns_ea5b4030-b52f-44b2-8d70-45f15f987d01 b1deff4b-51df-4a37-9043-afbd7c70a1cb in service none none off + oxp_30c16fe4-4229-49d0-ab01-3138f2c7dff2/crypt/zone/oxz_nexus_26fbf986-e560-4449-a351-547d1721b90e e0b86bc5-6a64-432b-bcbe-482e228a4e7d in service none none off oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_nexus_3eeb8d49-eb1a-43f8-bb64-c2338421c2c6 4da74a5b-6911-4cca-b624-b90c65530117 in service none none off oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/zone/oxz_ntp_f10a4fb9-759f-4a65-b25e-5794ad2d07d8 c65a9c1c-36dc-4ddb-8aac-ec3be8dbb209 in service none none off oxp_18b20749-0748-4105-bb10-7b13cfc776e2/crypt/debug 7a6a2058-ea78-49de-9730-cce5e28b4cfb in service 100 GiB none gzip-9 @@ -4109,17 +4576,18 @@ to: blueprint b82656b0-a9be-433d-83d0-e2bdf371777a omicron zones: - --------------------------------------------------------------------------------------------------------------- - zone type zone id image source disposition underlay IP - --------------------------------------------------------------------------------------------------------------- - crucible 694bd14f-cb24-4be4-bb19-876e79cda2c8 install dataset in service fd00:1122:3344:103::26 - crucible 7c252b64-c5af-4ec1-989e-9a03f3b0f111 install dataset in service fd00:1122:3344:103::27 - crucible f55647d4-5500-4ad3-893a-df45bd50d622 install dataset in service fd00:1122:3344:103::25 - crucible_pantry 75b220ba-a0f4-4872-8202-dc7c87f062d0 install dataset in service fd00:1122:3344:103::24 - external_dns f6ec9c67-946a-4da3-98d5-581f72ce8bf0 install dataset in service fd00:1122:3344:103::23 - internal_dns ea5b4030-b52f-44b2-8d70-45f15f987d01 install dataset in service fd00:1122:3344:3::1 - internal_ntp f10a4fb9-759f-4a65-b25e-5794ad2d07d8 install dataset in service fd00:1122:3344:103::21 - nexus 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6 install dataset in service fd00:1122:3344:103::22 + ----------------------------------------------------------------------------------------------------------------------- + zone type zone id image source disposition underlay IP + ----------------------------------------------------------------------------------------------------------------------- + crucible 694bd14f-cb24-4be4-bb19-876e79cda2c8 install dataset in service fd00:1122:3344:103::26 + crucible 7c252b64-c5af-4ec1-989e-9a03f3b0f111 install dataset in service fd00:1122:3344:103::27 + crucible f55647d4-5500-4ad3-893a-df45bd50d622 install dataset in service fd00:1122:3344:103::25 + crucible_pantry 75b220ba-a0f4-4872-8202-dc7c87f062d0 install dataset in service fd00:1122:3344:103::24 + external_dns f6ec9c67-946a-4da3-98d5-581f72ce8bf0 install dataset in service fd00:1122:3344:103::23 + internal_dns ea5b4030-b52f-44b2-8d70-45f15f987d01 install dataset in service fd00:1122:3344:3::1 + internal_ntp f10a4fb9-759f-4a65-b25e-5794ad2d07d8 install dataset in service fd00:1122:3344:103::21 + nexus 26fbf986-e560-4449-a351-547d1721b90e artifact: version 1.0.0 in service fd00:1122:3344:103::28 + nexus 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6 install dataset in service fd00:1122:3344:103::22 COCKROACHDB SETTINGS: @@ -4130,6 +4598,7 @@ to: blueprint b82656b0-a9be-433d-83d0-e2bdf371777a internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) @@ -4151,6 +4620,8 @@ internal DNS: AAAA fd00:1122:3344:101::27 name: 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host (records: 1) AAAA fd00:1122:3344:101::22 + name: 26fbf986-e560-4449-a351-547d1721b90e.host (records: 1) + AAAA fd00:1122:3344:103::28 name: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled (records: 1) AAAA fd00:1122:3344:102::1 name: 353b3b65-20f7-48c3-88f7-495bd5d31545.host (records: 1) @@ -4159,6 +4630,8 @@ internal DNS: AAAA fd00:1122:3344:103::22 name: 427ec88f-f467-42fa-9bbb-66a91a36103c.host (records: 1) AAAA fd00:1122:3344:2::1 + name: 43cbc3a6-e640-43f5-a9a2-f83eff427870.host (records: 1) + AAAA fd00:1122:3344:101::28 name: 466a9f29-62bf-4e63-924a-b9efdb86afec.host (records: 1) AAAA fd00:1122:3344:102::22 name: 5199c033-4cf9-4ab6-8ae7-566bd7606363.host (records: 1) @@ -4227,16 +4700,21 @@ internal DNS: SRV port 5353 427ec88f-f467-42fa-9bbb-66a91a36103c.host.control-plane.oxide.internal SRV port 5353 99e2f30b-3174-40bf-a78a-90da8abba8ca.host.control-plane.oxide.internal SRV port 5353 ea5b4030-b52f-44b2-8d70-45f15f987d01.host.control-plane.oxide.internal - name: _nexus._tcp (records: 3) + name: _nexus._tcp (records: 6) SRV port 12221 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host.control-plane.oxide.internal + SRV port 12221 26fbf986-e560-4449-a351-547d1721b90e.host.control-plane.oxide.internal SRV port 12221 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host.control-plane.oxide.internal + SRV port 12221 43cbc3a6-e640-43f5-a9a2-f83eff427870.host.control-plane.oxide.internal SRV port 12221 466a9f29-62bf-4e63-924a-b9efdb86afec.host.control-plane.oxide.internal + SRV port 12221 a67ad53f-d551-40e7-abae-57664779b27b.host.control-plane.oxide.internal name: _oximeter-reader._tcp (records: 1) SRV port 9000 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal name: _repo-depot._tcp (records: 3) SRV port 12348 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled.control-plane.oxide.internal SRV port 12348 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6.sled.control-plane.oxide.internal SRV port 12348 d81c6a84-79b8-4958-ae41-ea46c9b19763.sled.control-plane.oxide.internal + name: a67ad53f-d551-40e7-abae-57664779b27b.host (records: 1) + AAAA fd00:1122:3344:102::29 name: ad6a3a03-8d0f-4504-99a4-cbf73d69b973.host (records: 1) AAAA fd00:1122:3344:102::25 name: ba4994a8-23f9-4b1a-a84f-a08d74591389.host (records: 1) @@ -4270,9 +4748,12 @@ external DNS: NS ns1.oxide.example NS ns2.oxide.example NS ns3.oxide.example - name: example-silo.sys (records: 3) + name: example-silo.sys (records: 6) A 192.0.2.2 + A 192.0.2.7 A 192.0.2.3 + A 192.0.2.6 + A 192.0.2.5 A 192.0.2.4 name: ns1 (records: 1) A 198.51.100.1 @@ -4297,95 +4778,30 @@ generated inventory collection 36ef425f-a672-4bf4-8d29-14815a84ccad from configu > # Do one more planning run. This should update one control plane zone. > blueprint-plan latest latest -INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 0, num_eligible: 0, num_ineligible: 9 +INFO performed noop zone image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 10, num_already_artifact: 1, num_eligible: 0, num_ineligible: 9 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a -INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO performed noop zone image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 9, num_already_artifact: 1, num_eligible: 0, num_ineligible: 8 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a -INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO performed noop zone image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 9, num_already_artifact: 1, num_eligible: 0, num_ineligible: 8 INFO BootPartitionDetails inventory hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, slot: a, expected_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a INFO MGS-driven update completed (will remove it and re-evaluate board), artifact_version: 1.0.0, artifact_hash: 2053f8594971bbf0a7326c833e2ffc12b065b9d823b9c0b967d275fa595e4e89, sled_agent_address: [fd00:1122:3344:103::1]:12345, expected_inactive_phase_2_hash: f3dd0c7a1bd4500ea0d8bcf67581f576d47752b2f1998a4cb0f0c3155c483008, expected_inactive_phase_1_hash: 0202020202020202020202020202020202020202020202020202020202020202, expected_active_phase_2_hash: 0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a, expected_active_phase_1_hash: 0101010101010101010101010101010101010101010101010101010101010101, expected_boot_disk: A, expected_active_phase_1_slot: A, component: host_phase_1, sp_slot: 2, sp_type: Sled, serial_number: serial2, part_number: model2 INFO skipping board for MGS-driven update, serial_number: serial2, part_number: model2 INFO skipping board for MGS-driven update, serial_number: serial0, part_number: model0 INFO skipping board for MGS-driven update, serial_number: serial1, part_number: model1 INFO ran out of boards for MGS-driven update +INFO some zones not yet up-to-date, zones_currently_updating: [ZonePropagationIncomplete { zone_id: a67ad53f-d551-40e7-abae-57664779b27b (service), zone_kind: Nexus, reason: MissingInInventory { bp_image_source: Artifact { version: Available { version: ArtifactVersion("1.0.0") }, hash: ArtifactHash("0e32b4a3e5d3668bb1d6a16fb06b74dc60b973fa479dcee0aae3adbb52bf1388") } } }, ZonePropagationIncomplete { zone_id: 43cbc3a6-e640-43f5-a9a2-f83eff427870 (service), zone_kind: Nexus, reason: MissingInInventory { bp_image_source: Artifact { version: Available { version: ArtifactVersion("1.0.0") }, hash: ArtifactHash("0e32b4a3e5d3668bb1d6a16fb06b74dc60b973fa479dcee0aae3adbb52bf1388") } } }, ZonePropagationIncomplete { zone_id: 26fbf986-e560-4449-a351-547d1721b90e (service), zone_kind: Nexus, reason: MissingInInventory { bp_image_source: Artifact { version: Available { version: ArtifactVersion("1.0.0") }, hash: ArtifactHash("0e32b4a3e5d3668bb1d6a16fb06b74dc60b973fa479dcee0aae3adbb52bf1388") } } }] generated blueprint 31c84831-be52-4630-bc3f-128d72cd8f22 based on parent blueprint b82656b0-a9be-433d-83d0-e2bdf371777a planning report for blueprint 31c84831-be52-4630-bc3f-128d72cd8f22: chicken switches: add zones with mupdate override: false -* 1 out-of-date zone updated in-place: - * sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, zone 353b3b65-20f7-48c3-88f7-495bd5d31545 (clickhouse) -* 25 remaining out-of-date zones +* waiting to update top-level nexus_generation: pending non-nexus zone updates > blueprint-diff latest from: blueprint b82656b0-a9be-433d-83d0-e2bdf371777a to: blueprint 31c84831-be52-4630-bc3f-128d72cd8f22 - MODIFIED SLEDS: - - sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c (active, config generation 3 -> 4): - - host phase 2 contents: - ------------------------------ - slot boot image source - ------------------------------ - A current contents - B artifact: version 1.0.0 - - - physical disks: - ------------------------------------------------------------------------------------ - vendor model serial disposition - ------------------------------------------------------------------------------------ - fake-vendor fake-model serial-727522a7-934f-494d-b5b3-160968e74463 in service - fake-vendor fake-model serial-72c59873-31ff-4e36-8d76-ff834009349a in service - fake-vendor fake-model serial-b5fd5bc1-099e-4e77-8028-a9793c11f43b in service - - - datasets: - ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - dataset name dataset id disposition quota reservation compression - ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - oxp_727522a7-934f-494d-b5b3-160968e74463/crucible 2f204c50-a327-479c-8852-f53ec7a19c1f in service none none off - oxp_72c59873-31ff-4e36-8d76-ff834009349a/crucible 78f34ce7-42f1-41da-995f-318f32054ad2 in service none none off - oxp_b5fd5bc1-099e-4e77-8028-a9793c11f43b/crucible 1640adb6-70bf-44cf-b05c-bff6dd300cf3 in service none none off - oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/clickhouse 841d5648-05f0-47b0-b446-92f6b60fe9a6 in service none none off - oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/external_dns 8e0bd2bd-23b7-4bc6-9e73-c4d4ebc0bc8c in service none none off - oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/internal_dns 2ad1875a-92ac-472f-8c26-593309f0e4da in service none none off - oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone 4829f422-aa31-41a8-ab73-95684ff1ef48 in service none none off - oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/zone 775f9207-c42d-4af2-9186-27ffef67735e in service none none off - oxp_b5fd5bc1-099e-4e77-8028-a9793c11f43b/crypt/zone 3b66453b-7148-4c1b-84a9-499e43290ab4 in service none none off - oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_clickhouse_353b3b65-20f7-48c3-88f7-495bd5d31545 b46de15d-33e7-4cd0-aa7c-e7be2a61e71b in service none none off - oxp_b5fd5bc1-099e-4e77-8028-a9793c11f43b/crypt/zone/oxz_crucible_86a22a56-0168-453d-9df1-cb2a7c64b5d3 3e0d6188-c503-49cf-a441-fa7df40ceb43 in service none none off - oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_crucible_bd354eef-d8a6-4165-9124-283fb5e46d77 5ae11c7e-08fa-4d78-a4ea-14b4a9a10241 in service none none off - oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/zone/oxz_crucible_e2fdefe7-95b2-4fd2-ae37-56929a06d58c b8f2a09f-8bd2-4418-872b-a4457a3f958c in service none none off - oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_crucible_pantry_ad6a3a03-8d0f-4504-99a4-cbf73d69b973 49f8fbb6-5bac-4609-907f-6e3dfc206059 in service none none off - oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_external_dns_6c3ae381-04f7-41ea-b0ac-74db387dbc3a 8c4fa711-1d5d-4e93-85f0-d17bff47b063 in service none none off - oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_internal_dns_99e2f30b-3174-40bf-a78a-90da8abba8ca c31623de-c19b-4615-9f1d-5e1daa5d3bda in service none none off - oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_nexus_466a9f29-62bf-4e63-924a-b9efdb86afec 3560dd69-3b23-4c69-807d-d673104cfc68 in service none none off - oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/zone/oxz_ntp_62620961-fc4a-481e-968b-f5acbac0dc63 09b9cc9b-3426-470b-a7bc-538f82dede03 in service none none off - oxp_727522a7-934f-494d-b5b3-160968e74463/crypt/debug 93957ca0-9ed1-4e7b-8c34-2ce07a69541c in service 100 GiB none gzip-9 - oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/debug 2db6b7c1-0f46-4ced-a3ad-48872793360e in service 100 GiB none gzip-9 - oxp_b5fd5bc1-099e-4e77-8028-a9793c11f43b/crypt/debug 318fae85-abcb-4259-b1b6-ac96d193f7b7 in service 100 GiB none gzip-9 - - - omicron zones: - ------------------------------------------------------------------------------------------------------------------------- - zone type zone id image source disposition underlay IP - ------------------------------------------------------------------------------------------------------------------------- - crucible 86a22a56-0168-453d-9df1-cb2a7c64b5d3 install dataset in service fd00:1122:3344:102::28 - crucible bd354eef-d8a6-4165-9124-283fb5e46d77 install dataset in service fd00:1122:3344:102::26 - crucible e2fdefe7-95b2-4fd2-ae37-56929a06d58c install dataset in service fd00:1122:3344:102::27 - crucible_pantry ad6a3a03-8d0f-4504-99a4-cbf73d69b973 install dataset in service fd00:1122:3344:102::25 - external_dns 6c3ae381-04f7-41ea-b0ac-74db387dbc3a install dataset in service fd00:1122:3344:102::24 - internal_dns 99e2f30b-3174-40bf-a78a-90da8abba8ca install dataset in service fd00:1122:3344:1::1 - internal_ntp 62620961-fc4a-481e-968b-f5acbac0dc63 install dataset in service fd00:1122:3344:102::21 - nexus 466a9f29-62bf-4e63-924a-b9efdb86afec install dataset in service fd00:1122:3344:102::22 -* clickhouse 353b3b65-20f7-48c3-88f7-495bd5d31545 - install dataset in service fd00:1122:3344:102::23 - └─ + artifact: version 1.0.0 - - COCKROACHDB SETTINGS: state fingerprint::::::::::::::::: (none) (unchanged) cluster.preserve_downgrade_option: (do not modify) (unchanged) @@ -4394,6 +4810,7 @@ to: blueprint 31c84831-be52-4630-bc3f-128d72cd8f22 internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) @@ -4414,6 +4831,8 @@ internal DNS: AAAA fd00:1122:3344:101::27 name: 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host (records: 1) AAAA fd00:1122:3344:101::22 + name: 26fbf986-e560-4449-a351-547d1721b90e.host (records: 1) + AAAA fd00:1122:3344:103::28 name: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled (records: 1) AAAA fd00:1122:3344:102::1 name: 353b3b65-20f7-48c3-88f7-495bd5d31545.host (records: 1) @@ -4422,6 +4841,8 @@ internal DNS: AAAA fd00:1122:3344:103::22 name: 427ec88f-f467-42fa-9bbb-66a91a36103c.host (records: 1) AAAA fd00:1122:3344:2::1 + name: 43cbc3a6-e640-43f5-a9a2-f83eff427870.host (records: 1) + AAAA fd00:1122:3344:101::28 name: 466a9f29-62bf-4e63-924a-b9efdb86afec.host (records: 1) AAAA fd00:1122:3344:102::22 name: 5199c033-4cf9-4ab6-8ae7-566bd7606363.host (records: 1) @@ -4490,16 +4911,21 @@ internal DNS: SRV port 5353 427ec88f-f467-42fa-9bbb-66a91a36103c.host.control-plane.oxide.internal SRV port 5353 99e2f30b-3174-40bf-a78a-90da8abba8ca.host.control-plane.oxide.internal SRV port 5353 ea5b4030-b52f-44b2-8d70-45f15f987d01.host.control-plane.oxide.internal - name: _nexus._tcp (records: 3) + name: _nexus._tcp (records: 6) SRV port 12221 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host.control-plane.oxide.internal + SRV port 12221 26fbf986-e560-4449-a351-547d1721b90e.host.control-plane.oxide.internal SRV port 12221 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host.control-plane.oxide.internal + SRV port 12221 43cbc3a6-e640-43f5-a9a2-f83eff427870.host.control-plane.oxide.internal SRV port 12221 466a9f29-62bf-4e63-924a-b9efdb86afec.host.control-plane.oxide.internal + SRV port 12221 a67ad53f-d551-40e7-abae-57664779b27b.host.control-plane.oxide.internal name: _oximeter-reader._tcp (records: 1) SRV port 9000 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal name: _repo-depot._tcp (records: 3) SRV port 12348 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled.control-plane.oxide.internal SRV port 12348 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6.sled.control-plane.oxide.internal SRV port 12348 d81c6a84-79b8-4958-ae41-ea46c9b19763.sled.control-plane.oxide.internal + name: a67ad53f-d551-40e7-abae-57664779b27b.host (records: 1) + AAAA fd00:1122:3344:102::29 name: ad6a3a03-8d0f-4504-99a4-cbf73d69b973.host (records: 1) AAAA fd00:1122:3344:102::25 name: ba4994a8-23f9-4b1a-a84f-a08d74591389.host (records: 1) @@ -4533,9 +4959,12 @@ external DNS: NS ns1.oxide.example NS ns2.oxide.example NS ns3.oxide.example - name: example-silo.sys (records: 3) + name: example-silo.sys (records: 6) A 192.0.2.2 + A 192.0.2.7 A 192.0.2.3 + A 192.0.2.6 + A 192.0.2.5 A 192.0.2.4 name: ns1 (records: 1) A 198.51.100.1 diff --git a/live-tests/tests/test_nexus_add_remove.rs b/live-tests/tests/test_nexus_add_remove.rs index 8116af7321d..4d61b0d57b1 100644 --- a/live-tests/tests/test_nexus_add_remove.rs +++ b/live-tests/tests/test_nexus_add_remove.rs @@ -53,10 +53,14 @@ async fn test_nexus_add_remove(lc: &LiveTestContext) { .map_or_else(PlannerChickenSwitches::default, |cs| { cs.switches.planner_switches }); - let planning_input = - PlanningInputFromDb::assemble(&opctx, &datastore, chicken_switches) - .await - .expect("planning input"); + let planning_input = PlanningInputFromDb::assemble( + &opctx, + &datastore, + chicken_switches, + None, + ) + .await + .expect("planning input"); let collection = datastore .inventory_get_latest_collection(opctx) .await @@ -271,10 +275,14 @@ async fn test_nexus_add_remove(lc: &LiveTestContext) { // Now run through the planner. info!(log, "running through planner"); - let planning_input = - PlanningInputFromDb::assemble(&opctx, &datastore, chicken_switches) - .await - .expect("planning input"); + let planning_input = PlanningInputFromDb::assemble( + &opctx, + &datastore, + chicken_switches, + None, + ) + .await + .expect("planning input"); let (_, parent_blueprint) = datastore .blueprint_target_get_current_full(opctx) .await diff --git a/nexus/db-model/src/deployment.rs b/nexus/db-model/src/deployment.rs index e3e7cd50ddf..5cd9ca9e500 100644 --- a/nexus/db-model/src/deployment.rs +++ b/nexus/db-model/src/deployment.rs @@ -81,6 +81,7 @@ pub struct Blueprint { pub creator: String, pub comment: String, pub target_release_minimum_generation: Generation, + pub nexus_generation: Generation, } impl From<&'_ nexus_types::deployment::Blueprint> for Blueprint { @@ -100,6 +101,7 @@ impl From<&'_ nexus_types::deployment::Blueprint> for Blueprint { target_release_minimum_generation: Generation( bp.target_release_minimum_generation, ), + nexus_generation: Generation(bp.nexus_generation), } } } @@ -113,6 +115,7 @@ impl From for nexus_types::deployment::BlueprintMetadata { external_dns_version: *value.external_dns_version, target_release_minimum_generation: *value .target_release_minimum_generation, + nexus_generation: *value.nexus_generation, cockroachdb_fingerprint: value.cockroachdb_fingerprint, cockroachdb_setting_preserve_downgrade: CockroachDbPreserveDowngrade::from_optional_string( @@ -524,6 +527,7 @@ pub struct BpOmicronZone { pub image_source: DbBpZoneImageSource, pub image_artifact_sha256: Option, + pub nexus_generation: Option, } impl BpOmicronZone { @@ -585,6 +589,7 @@ impl BpOmicronZone { snat_ip: None, snat_first_port: None, snat_last_port: None, + nexus_generation: None, }; match &blueprint_zone.zone_type { @@ -716,6 +721,7 @@ impl BpOmicronZone { nic, external_tls, external_dns_servers, + nexus_generation, }) => { // Set the common fields bp_omicron_zone @@ -733,6 +739,8 @@ impl BpOmicronZone { .map(IpNetwork::from) .collect(), ); + bp_omicron_zone.nexus_generation = + Some(Generation::from(*nexus_generation)); } BlueprintZoneType::Oximeter(blueprint_zone_type::Oximeter { address, @@ -938,6 +946,9 @@ impl BpOmicronZone { .into_iter() .map(|i| i.ip()) .collect(), + nexus_generation: *self.nexus_generation.ok_or_else( + || anyhow!("expected 'nexus_generation'"), + )?, }) } ZoneType::Oximeter => { diff --git a/nexus/db-model/src/schema_versions.rs b/nexus/db-model/src/schema_versions.rs index fd8fc89b3bb..cde3062083b 100644 --- a/nexus/db-model/src/schema_versions.rs +++ b/nexus/db-model/src/schema_versions.rs @@ -16,7 +16,7 @@ use std::{collections::BTreeMap, sync::LazyLock}; /// /// This must be updated when you change the database schema. Refer to /// schema/crdb/README.adoc in the root of this repository for details. -pub const SCHEMA_VERSION: Version = Version::new(183, 0, 0); +pub const SCHEMA_VERSION: Version = Version::new(184, 0, 0); /// List of all past database schema versions, in *reverse* order /// @@ -28,6 +28,7 @@ static KNOWN_VERSIONS: LazyLock> = LazyLock::new(|| { // | leaving the first copy as an example for the next person. // v // KnownVersion::new(next_int, "unique-dirname-with-the-sql-files"), + KnownVersion::new(184, "nexus-generation"), KnownVersion::new(183, "add-ip-version-to-pools"), KnownVersion::new(182, "add-tuf-artifact-board"), KnownVersion::new(181, "rename-nat-table"), diff --git a/nexus/db-queries/src/db/datastore/deployment.rs b/nexus/db-queries/src/db/datastore/deployment.rs index 0e46092de95..6ce19a93ff7 100644 --- a/nexus/db-queries/src/db/datastore/deployment.rs +++ b/nexus/db-queries/src/db/datastore/deployment.rs @@ -548,6 +548,7 @@ impl DataStore { internal_dns_version, external_dns_version, target_release_minimum_generation, + nexus_generation, cockroachdb_fingerprint, cockroachdb_setting_preserve_downgrade, time_created, @@ -574,6 +575,7 @@ impl DataStore { *blueprint.internal_dns_version, *blueprint.external_dns_version, *blueprint.target_release_minimum_generation, + *blueprint.nexus_generation, blueprint.cockroachdb_fingerprint, blueprint.cockroachdb_setting_preserve_downgrade, blueprint.time_created, @@ -1325,6 +1327,7 @@ impl DataStore { internal_dns_version, external_dns_version, target_release_minimum_generation, + nexus_generation, cockroachdb_fingerprint, cockroachdb_setting_preserve_downgrade, clickhouse_cluster_config, @@ -4271,6 +4274,7 @@ mod tests { }, external_tls: false, external_dns_servers: vec![], + nexus_generation: Generation::new(), }, ), image_source: BlueprintZoneImageSource::InstallDataset, diff --git a/nexus/db-queries/src/db/datastore/deployment/external_networking.rs b/nexus/db-queries/src/db/datastore/deployment/external_networking.rs index e8cb951f85b..dd525fdbbc9 100644 --- a/nexus/db-queries/src/db/datastore/deployment/external_networking.rs +++ b/nexus/db-queries/src/db/datastore/deployment/external_networking.rs @@ -454,6 +454,7 @@ mod tests { use omicron_common::address::NEXUS_OPTE_IPV4_SUBNET; use omicron_common::address::NTP_OPTE_IPV4_SUBNET; use omicron_common::address::NUM_SOURCE_NAT_PORTS; + use omicron_common::api::external::Generation; use omicron_common::api::external::MacAddr; use omicron_common::api::external::Vni; use omicron_common::zpool_name::ZpoolName; @@ -643,6 +644,7 @@ mod tests { nic: self.nexus_nic.clone(), external_tls: false, external_dns_servers: Vec::new(), + nexus_generation: Generation::new(), }, ), image_source: BlueprintZoneImageSource::InstallDataset, diff --git a/nexus/db-queries/src/db/datastore/rack.rs b/nexus/db-queries/src/db/datastore/rack.rs index 70cf81c1bb6..e2c4f62ffb3 100644 --- a/nexus/db-queries/src/db/datastore/rack.rs +++ b/nexus/db-queries/src/db/datastore/rack.rs @@ -1091,6 +1091,7 @@ mod test { internal_dns_version: *Generation::new(), external_dns_version: *Generation::new(), target_release_minimum_generation: *Generation::new(), + nexus_generation: *Generation::new(), cockroachdb_fingerprint: String::new(), clickhouse_cluster_config: None, oximeter_read_version: *Generation::new(), @@ -1515,6 +1516,7 @@ mod test { slot: 0, transit_ips: vec![], }, + nexus_generation: *Generation::new(), }, ), image_source: BlueprintZoneImageSource::InstallDataset, @@ -1585,6 +1587,7 @@ mod test { internal_dns_version: *Generation::new(), external_dns_version: *Generation::new(), target_release_minimum_generation: *Generation::new(), + nexus_generation: *Generation::new(), cockroachdb_fingerprint: String::new(), clickhouse_cluster_config: None, oximeter_read_version: *Generation::new(), @@ -1773,6 +1776,7 @@ mod test { slot: 0, transit_ips: vec![], }, + nexus_generation: *Generation::new(), }, ), image_source: BlueprintZoneImageSource::InstallDataset, @@ -1806,6 +1810,7 @@ mod test { slot: 0, transit_ips: vec![], }, + nexus_generation: *Generation::new(), }, ), image_source: BlueprintZoneImageSource::InstallDataset, @@ -1850,6 +1855,7 @@ mod test { internal_dns_version: *Generation::new(), external_dns_version: *Generation::new(), target_release_minimum_generation: *Generation::new(), + nexus_generation: *Generation::new(), cockroachdb_fingerprint: String::new(), clickhouse_cluster_config: None, oximeter_read_version: *Generation::new(), @@ -2056,6 +2062,7 @@ mod test { slot: 0, transit_ips: vec![], }, + nexus_generation: *Generation::new(), }, ), image_source: BlueprintZoneImageSource::InstallDataset, @@ -2107,6 +2114,7 @@ mod test { creator: "test suite".to_string(), comment: "test blueprint".to_string(), report: PlanningReport::new(blueprint_id), + nexus_generation: *Generation::new(), }; let rack = datastore @@ -2280,6 +2288,7 @@ mod test { slot: 0, transit_ips: vec![], }, + nexus_generation: *Generation::new(), }, ), image_source: BlueprintZoneImageSource::InstallDataset, @@ -2298,6 +2307,7 @@ mod test { internal_dns_version: *Generation::new(), external_dns_version: *Generation::new(), target_release_minimum_generation: *Generation::new(), + nexus_generation: *Generation::new(), cockroachdb_fingerprint: String::new(), clickhouse_cluster_config: None, oximeter_read_version: *Generation::new(), @@ -2420,6 +2430,7 @@ mod test { slot: 0, transit_ips: vec![], }, + nexus_generation: *Generation::new(), }, ), image_source: BlueprintZoneImageSource::InstallDataset, @@ -2440,6 +2451,7 @@ mod test { internal_dns_version: *Generation::new(), external_dns_version: *Generation::new(), target_release_minimum_generation: *Generation::new(), + nexus_generation: *Generation::new(), cockroachdb_fingerprint: String::new(), clickhouse_cluster_config: None, oximeter_read_version: *Generation::new(), diff --git a/nexus/db-queries/src/db/datastore/vpc.rs b/nexus/db-queries/src/db/datastore/vpc.rs index 31f2111a0c8..399a595e9f8 100644 --- a/nexus/db-queries/src/db/datastore/vpc.rs +++ b/nexus/db-queries/src/db/datastore/vpc.rs @@ -3330,12 +3330,16 @@ mod tests { ) .expect("ensured disks"); } + let external_tls = false; + let external_dns_servers = vec![]; + let nexus_generation = builder.parent_blueprint().nexus_generation; builder .sled_add_zone_nexus_with_config( sled_ids[2], - false, - Vec::new(), + external_tls, + external_dns_servers, BlueprintZoneImageSource::InstallDataset, + nexus_generation, ) .expect("added nexus to third sled"); builder.build() @@ -3405,12 +3409,17 @@ mod tests { ) .expect("created blueprint builder"); for &sled_id in &sled_ids { + let external_tls = false; + let external_dns_servers = vec![]; + let nexus_generation = + builder.parent_blueprint().nexus_generation; builder .sled_add_zone_nexus_with_config( sled_id, - false, - Vec::new(), + external_tls, + external_dns_servers, BlueprintZoneImageSource::InstallDataset, + nexus_generation, ) .expect("added nexus to third sled"); } diff --git a/nexus/db-schema/src/schema.rs b/nexus/db-schema/src/schema.rs index 7d4daff9d2d..1c465eef732 100644 --- a/nexus/db-schema/src/schema.rs +++ b/nexus/db-schema/src/schema.rs @@ -1968,6 +1968,8 @@ table! { cockroachdb_setting_preserve_downgrade -> Nullable, target_release_minimum_generation -> Int8, + + nexus_generation -> Int8, } } @@ -2068,6 +2070,7 @@ table! { filesystem_pool -> Uuid, image_source -> crate::enums::BpZoneImageSourceEnum, image_artifact_sha256 -> Nullable, + nexus_generation -> Nullable, } } diff --git a/nexus/reconfigurator/blippy/src/blippy.rs b/nexus/reconfigurator/blippy/src/blippy.rs index 07192c7f276..dbcd4858570 100644 --- a/nexus/reconfigurator/blippy/src/blippy.rs +++ b/nexus/reconfigurator/blippy/src/blippy.rs @@ -14,6 +14,7 @@ use nexus_types::inventory::ZpoolName; use omicron_common::address::DnsSubnet; use omicron_common::address::Ipv6Subnet; use omicron_common::address::SLED_PREFIX; +use omicron_common::api::external::Generation; use omicron_common::api::external::MacAddr; use omicron_common::disk::DatasetKind; use omicron_common::disk::M2Slot; @@ -193,6 +194,12 @@ pub enum SledKind { version: BlueprintArtifactVersion, hash: ArtifactHash, }, + /// Nexus zones with the same generation have different image sources. + NexusZoneGenerationImageSourceMismatch { + zone1: BlueprintZoneConfig, + zone2: BlueprintZoneConfig, + generation: Generation, + }, } impl fmt::Display for SledKind { @@ -415,6 +422,18 @@ impl fmt::Display for SledKind { (version {version}, hash {hash})", ) } + SledKind::NexusZoneGenerationImageSourceMismatch { + zone1, + zone2, + generation, + } => { + write!( + f, + "Nexus zones {} and {} both have generation {generation} but \ + different image sources ({:?} vs {:?})", + zone1.id, zone2.id, zone1.image_source, zone2.image_source, + ) + } } } } diff --git a/nexus/reconfigurator/blippy/src/checks.rs b/nexus/reconfigurator/blippy/src/checks.rs index 3894ae335b3..264e8ee5d02 100644 --- a/nexus/reconfigurator/blippy/src/checks.rs +++ b/nexus/reconfigurator/blippy/src/checks.rs @@ -21,6 +21,7 @@ use nexus_types::deployment::blueprint_zone_type; use omicron_common::address::DnsSubnet; use omicron_common::address::Ipv6Subnet; use omicron_common::address::SLED_PREFIX; +use omicron_common::api::external::Generation; use omicron_common::disk::DatasetKind; use omicron_common::disk::M2Slot; use omicron_uuid_kinds::MupdateOverrideUuid; @@ -37,6 +38,7 @@ pub(crate) fn perform_all_blueprint_only_checks(blippy: &mut Blippy<'_>) { check_dataset_zpool_uniqueness(blippy); check_datasets(blippy); check_mupdate_override(blippy); + check_nexus_generation_consistency(blippy); } fn check_underlay_ips(blippy: &mut Blippy<'_>) { @@ -632,6 +634,55 @@ fn check_mupdate_override_host_phase_2_contents( } } +fn check_nexus_generation_consistency(blippy: &mut Blippy<'_>) { + use std::collections::HashMap; + + // Map from generation -> (sled_id, image_source, zone) + let mut generation_info: HashMap< + Generation, + Vec<(SledUuid, BlueprintZoneImageSource, &BlueprintZoneConfig)>, + > = HashMap::new(); + + // Collect all Nexus zones and their generations + for (sled_id, zone) in blippy + .blueprint() + .all_omicron_zones(BlueprintZoneDisposition::is_in_service) + { + if let BlueprintZoneType::Nexus(nexus) = &zone.zone_type { + generation_info.entry(nexus.nexus_generation).or_default().push(( + sled_id, + zone.image_source.clone(), + zone, + )); + } + } + + // Check each generation for image source consistency + for (generation, zones_with_gen) in generation_info { + if zones_with_gen.len() < 2 { + continue; // Only one zone with this generation, no consistency issue + } + + // Take the first zone as the reference + let (ref_sled_id, ref_image_source, ref_zone) = &zones_with_gen[0]; + + // Compare all other zones to the reference + for (_sled_id, image_source, zone) in &zones_with_gen[1..] { + if image_source != ref_image_source { + blippy.push_sled_note( + *ref_sled_id, + Severity::Fatal, + SledKind::NexusZoneGenerationImageSourceMismatch { + zone1: (*ref_zone).clone(), + zone2: (*zone).clone(), + generation, + }, + ); + } + } + } +} + #[cfg(test)] mod tests { use super::*; @@ -1812,4 +1863,105 @@ mod tests { logctx.cleanup_successful(); } + + #[test] + fn test_nexus_generation_consistency() { + static TEST_NAME: &str = "test_nexus_generation_consistency"; + let logctx = test_setup_log(TEST_NAME); + let (_, mut blueprint) = + ExampleSystemBuilder::new(&logctx.log, TEST_NAME) + .nsleds(3) + .nexus_count(3) + .build(); + + // Find the Nexus zones + let ((sled1, zone1_id), (sled2, zone2_id)) = { + let nexus_zones: Vec<_> = blueprint + .all_omicron_zones(BlueprintZoneDisposition::is_in_service) + .filter_map(|(sled_id, zone)| { + if matches!(zone.zone_type, BlueprintZoneType::Nexus(_)) { + Some((sled_id, zone)) + } else { + None + } + }) + .collect(); + + // Should have exactly 3 Nexus zones + assert_eq!(nexus_zones.len(), 3); + + // Modify two zones to have the same generation but different image sources + let (sled1, zone1) = nexus_zones[0]; + let (sled2, zone2) = nexus_zones[1]; + + ((sled1, zone1.id), (sled2, zone2.id)) + }; + + let generation = Generation::new(); + + let zone1 = { + // Find the zones in the blueprint and modify them + let mut zone1_config = blueprint + .sleds + .get_mut(&sled1) + .unwrap() + .zones + .get_mut(&zone1_id) + .unwrap(); + + match &mut zone1_config.zone_type { + BlueprintZoneType::Nexus(nexus) => { + nexus.nexus_generation = generation; + } + _ => unreachable!("this is a Nexus zone"), + } + zone1_config.image_source = + BlueprintZoneImageSource::InstallDataset; + zone1_config.clone() + }; + + let zone2 = { + let mut zone2_config = blueprint + .sleds + .get_mut(&sled2) + .unwrap() + .zones + .get_mut(&zone2_id) + .unwrap(); + + match &mut zone2_config.zone_type { + BlueprintZoneType::Nexus(nexus) => { + nexus.nexus_generation = generation; + } + _ => unreachable!("this is a Nexus zone"), + } + zone2_config.image_source = BlueprintZoneImageSource::Artifact { + version: BlueprintArtifactVersion::Available { + version: "1.0.0".parse().unwrap(), + }, + hash: ArtifactHash([0; 32]), + }; + zone2_config.clone() + }; + + // Run blippy checks + let expected_notes = [Note { + severity: Severity::Fatal, + kind: Kind::Sled { + sled_id: sled1, + kind: SledKind::NexusZoneGenerationImageSourceMismatch { + zone1, + zone2, + generation, + }, + }, + }]; + + let report = + Blippy::new(&blueprint).into_report(BlippyReportSortKey::Kind); + eprintln!("{}", report.display()); + assert_eq!(report.notes(), &expected_notes); + + logctx.cleanup_successful(); + } } diff --git a/nexus/reconfigurator/cli-integration-tests/tests/integration/blueprint_edit.rs b/nexus/reconfigurator/cli-integration-tests/tests/integration/blueprint_edit.rs index fbfed8169eb..33301d9408b 100644 --- a/nexus/reconfigurator/cli-integration-tests/tests/integration/blueprint_edit.rs +++ b/nexus/reconfigurator/cli-integration-tests/tests/integration/blueprint_edit.rs @@ -116,7 +116,7 @@ async fn test_blueprint_edit(cptestctx: &ControlPlaneTestContext) { // Assemble state that we can load into reconfigurator-cli. let state1 = nexus_reconfigurator_preparation::reconfigurator_state_load( - &opctx, datastore, + &opctx, datastore, None, ) .await .expect("failed to assemble reconfigurator state"); diff --git a/nexus/reconfigurator/execution/src/dns.rs b/nexus/reconfigurator/execution/src/dns.rs index 2ce03d66421..3f6dc0e03a6 100644 --- a/nexus/reconfigurator/execution/src/dns.rs +++ b/nexus/reconfigurator/execution/src/dns.rs @@ -597,6 +597,7 @@ mod test { nic, external_tls, external_dns_servers, + nexus_generation: Generation::new(), }) } OmicronZoneType::Oximeter { address } => { @@ -720,6 +721,7 @@ mod test { internal_dns_version: initial_dns_generation, external_dns_version: Generation::new(), target_release_minimum_generation: Generation::new(), + nexus_generation: Generation::new(), cockroachdb_fingerprint: String::new(), clickhouse_cluster_config: None, oximeter_read_version: Generation::new(), @@ -1527,7 +1529,7 @@ mod test { chicken_switches: PlannerChickenSwitches::default(), log, } - .build() + .build(None) .unwrap() .into_builder(); diff --git a/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs b/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs index 0601c852fa7..08cce1a74d8 100644 --- a/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs +++ b/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs @@ -139,6 +139,22 @@ pub enum Error { AllocateInternalDnsSubnet(#[from] NoAvailableDnsSubnets), #[error("error allocating external networking resources")] AllocateExternalNetworking(#[from] ExternalNetworkingError), + #[error( + "mismatch while setting top-level nexus_generation for blueprint, \ + expected current value is {expected} but actual value is {actual}" + )] + NexusGenerationMismatch { expected: Generation, actual: Generation }, + #[error( + "mismatch while setting nexus_generation for a zone with an old image, \ + expected current value is {expected} but actual value is {actual}" + )] + OldImageNexusGenerationMismatch { expected: Generation, actual: Generation }, + #[error( + "mismatch while setting nexus_generation for a zone with a new image, \ + expected current value is {expected} (or that +1) but actual value is \ + {actual}" + )] + NewImageNexusGenerationMismatch { expected: Generation, actual: Generation }, #[error("can only have {INTERNAL_DNS_REDUNDANCY} internal DNS servers")] PolicySpecifiesTooManyInternalDnsServers, #[error("zone is already up-to-date and should not be updated")] @@ -357,6 +373,10 @@ pub(crate) enum Operation { current_generation: Generation, new_generation: Generation, }, + SetNexusGeneration { + current_generation: Generation, + new_generation: Generation, + }, SledNoopZoneImageSourcesUpdated { sled_id: SledUuid, count: usize, @@ -465,6 +485,13 @@ impl fmt::Display for Operation { {current_generation} to {new_generation}" ) } + Self::SetNexusGeneration { current_generation, new_generation } => { + write!( + f, + "updated nexus generation from \ + {current_generation} to {new_generation}" + ) + } } } } @@ -515,6 +542,7 @@ pub struct BlueprintBuilder<'a> { sled_editors: BTreeMap, cockroachdb_setting_preserve_downgrade: CockroachDbPreserveDowngrade, target_release_minimum_generation: Generation, + nexus_generation: Generation, report: Option, creator: String, @@ -582,6 +610,7 @@ impl<'a> BlueprintBuilder<'a> { internal_dns_version: Generation::new(), external_dns_version: Generation::new(), target_release_minimum_generation: Generation::new(), + nexus_generation: Generation::new(), cockroachdb_fingerprint: String::new(), cockroachdb_setting_preserve_downgrade: CockroachDbPreserveDowngrade::DoNotModify, @@ -663,6 +692,7 @@ impl<'a> BlueprintBuilder<'a> { pending_mgs_updates: parent_blueprint.pending_mgs_updates.clone(), target_release_minimum_generation: parent_blueprint .target_release_minimum_generation, + nexus_generation: parent_blueprint.nexus_generation, report: None, creator: creator.to_owned(), operations: Vec::new(), @@ -857,6 +887,7 @@ impl<'a> BlueprintBuilder<'a> { external_dns_version: self.input.external_dns_version(), target_release_minimum_generation: self .target_release_minimum_generation, + nexus_generation: self.nexus_generation, cockroachdb_fingerprint: self .input .cockroachdb_settings() @@ -1530,6 +1561,116 @@ impl<'a> BlueprintBuilder<'a> { Ok(Ensure::Added) } + // Determines TLS and DNS server configuration from existing Nexus zones. + // + // Returns `Some((external_tls, external_dns_servers))` if existing Nexus + // zones are found, or `None` if no existing Nexus zones exist. + fn determine_nexus_tls_dns_config(&self) -> Option<(bool, Vec)> { + self.parent_blueprint + .all_omicron_zones(BlueprintZoneDisposition::any) + .find_map(|(_, z)| match &z.zone_type { + BlueprintZoneType::Nexus(nexus) => Some(( + nexus.external_tls, + nexus.external_dns_servers.clone(), + )), + _ => None, + }) + } + + // Determines the appropriate generation number for a new Nexus zone. + // + // Returns `Some(generation)` if a generation can be determined from existing + // Nexus zones, or `None` if no existing Nexus zones exist. + // + // The logic is: + // - If any existing Nexus zone has the same image source, reuse its generation + // - Otherwise, use the highest existing generation + 1 + // - If no existing zones exist, return None + // + // This function also validates that the determined generation matches the + // top-level current blueprint generation. + fn determine_nexus_generation( + &self, + image_source: &BlueprintZoneImageSource, + ) -> Result, Error> { + // If any other Nexus in the blueprint has the same image source, + // use it. Otherwise, use the highest generation number + 1. + // + // TODO: This will check the parent blueprint, but perhaps should + // also be checking all "pending" updates in "sled_editors". + // If we are adding "multiple new nexus zones" in a blueprint, + // they'll all happen to get a generation number equal to "the previous + // highest generation, plus 1". But if, for some weird reason, + // we added multiple Nexuses with different new image sources in a single + // blueprint, they'd also get assigned the same generation (which should + // be a bug). + // + // In the meantime: There is a blippy check to verify that all Nexus + // zones with the same generation have the same image source. + let mut highest_seen_generation = None; + let mut same_image_nexus_generation = None; + + for (zone, nexus) in self + .parent_blueprint + .all_omicron_zones(BlueprintZoneDisposition::any) + .filter_map(|(_, z)| match &z.zone_type { + BlueprintZoneType::Nexus(nexus) => Some((z, nexus)), + _ => None, + }) + { + if zone.image_source == *image_source { + // If the image matches exactly, use it. + same_image_nexus_generation = Some(nexus.nexus_generation); + break; + } else if let Some(gen) = highest_seen_generation { + // Otherwise, use the generation number if it's the highest + // we've seen + if nexus.nexus_generation > gen { + highest_seen_generation = Some(nexus.nexus_generation); + } + } else { + // Use it regardless if it's the first generation number we've + // seen + highest_seen_generation = Some(nexus.nexus_generation); + } + } + + let determined_generation = match same_image_nexus_generation { + Some(gen) => Some(gen), + None => highest_seen_generation.map(|gen| gen.next()), + }; + + // Validate that the determined generation matches the top-level current blueprint generation + if let Some(gen) = determined_generation { + let current_blueprint_gen = self.parent_blueprint.nexus_generation; + if same_image_nexus_generation.is_some() { + // Existing image - should either match the currently-used Nexus + // generation, or be part of a "generation + 1". + let matches_current_nexus = current_blueprint_gen == gen; + let matches_next_nexus = current_blueprint_gen.next() == gen; + + if !matches_current_nexus && !matches_next_nexus { + return Err(Error::OldImageNexusGenerationMismatch { + expected: current_blueprint_gen, + actual: gen, + }); + } + } else { + // New image source - should be current blueprint generation + 1 + let expected_gen = current_blueprint_gen.next(); + if gen != expected_gen { + return Err(Error::NewImageNexusGenerationMismatch { + expected: expected_gen, + actual: gen, + }); + } + } + } + + Ok(determined_generation) + } + + /// Adds a nexus zone on this sled. pub fn sled_add_zone_nexus( &mut self, sled_id: SledUuid, @@ -1546,31 +1687,41 @@ impl<'a> BlueprintBuilder<'a> { // check that we're if this builder is being used to make such a change, // that change is also reflected here in a new zone. Perhaps these // settings should be part of `Policy` instead? - let (external_tls, external_dns_servers) = self - .parent_blueprint - .all_omicron_zones(BlueprintZoneDisposition::any) - .find_map(|(_, z)| match &z.zone_type { - BlueprintZoneType::Nexus(nexus) => Some(( - nexus.external_tls, - nexus.external_dns_servers.clone(), - )), - _ => None, - }) - .ok_or(Error::NoNexusZonesInParentBlueprint)?; + let (external_tls, external_dns_servers) = + match self.determine_nexus_tls_dns_config() { + Some(config) => config, + None => { + return Err(Error::NoNexusZonesInParentBlueprint); + } + }; + + let nexus_generation = + match self.determine_nexus_generation(&image_source)? { + Some(generation) => generation, + None => { + return Err(Error::NoNexusZonesInParentBlueprint); + } + }; + self.sled_add_zone_nexus_with_config( sled_id, external_tls, external_dns_servers, image_source, + nexus_generation, ) } + /// Add a Nexus zone on this sled with a specific configuration. + /// + /// If possible, callers should prefer to use [Self::sled_add_zone_nexus] pub fn sled_add_zone_nexus_with_config( &mut self, sled_id: SledUuid, external_tls: bool, external_dns_servers: Vec, image_source: BlueprintZoneImageSource, + nexus_generation: Generation, ) -> Result<(), Error> { let nexus_id = self.rng.sled_rng(sled_id).next_zone(); let ExternalNetworkingChoice { @@ -1608,6 +1759,7 @@ impl<'a> BlueprintBuilder<'a> { nic, external_tls, external_dns_servers: external_dns_servers.clone(), + nexus_generation, }); let filesystem_pool = self.sled_select_zpool(sled_id, zone_type.kind())?; @@ -2182,6 +2334,32 @@ impl<'a> BlueprintBuilder<'a> { Ok(()) } + /// Get the value of `nexus_generation`. + pub fn nexus_generation(&self) -> Generation { + self.nexus_generation + } + + /// Given the current value of `nexus_generation`, set the new value for + /// this blueprint. + pub fn set_nexus_generation( + &mut self, + current_generation: Generation, + new_generation: Generation, + ) -> Result<(), Error> { + if self.nexus_generation != current_generation { + return Err(Error::NexusGenerationMismatch { + expected: current_generation, + actual: self.nexus_generation, + }); + } + self.nexus_generation = new_generation; + self.record_operation(Operation::SetNexusGeneration { + current_generation, + new_generation, + }); + Ok(()) + } + /// Allow a test to manually add an external DNS address, which could /// ordinarily only come from RSS. /// @@ -3682,4 +3860,453 @@ pub mod test { logctx.cleanup_successful(); } + + /// Test nexus generation assignment logic for new zones + #[test] + fn test_nexus_generation_assignment_new_generation() { + static TEST_NAME: &str = + "test_nexus_generation_assignment_new_generation"; + let logctx = test_setup_log(TEST_NAME); + let mut rng = SimRngState::from_seed(TEST_NAME); + + // Start with a system that has no Nexus zones + let (example_system, blueprint) = + ExampleSystemBuilder::new(&logctx.log, TEST_NAME) + .nexus_count(0) + .build(); + verify_blueprint(&blueprint); + + let mut builder = BlueprintBuilder::new_based_on( + &logctx.log, + &blueprint, + &example_system.input, + &example_system.collection, + "test", + rng.next_planner_rng(), + ) + .expect("failed to create builder"); + + // Get first sled + let sled_id = example_system + .input + .all_sled_ids(SledFilter::Commissioned) + .next() + .unwrap(); + let image_source = BlueprintZoneImageSource::InstallDataset; + + // Add first Nexus zone - should get generation 1 + builder + .sled_add_zone_nexus_with_config( + sled_id, + false, + vec![], + image_source.clone(), + builder.parent_blueprint().nexus_generation, + ) + .expect("failed to add nexus zone"); + + let blueprint1 = builder.build(); + verify_blueprint(&blueprint1); + + // Find the nexus zone and verify it has generation 1 + let nexus_zones: Vec<_> = blueprint1 + .all_omicron_zones(BlueprintZoneDisposition::any) + .filter_map(|(_, zone)| match &zone.zone_type { + BlueprintZoneType::Nexus(nexus) => Some(nexus), + _ => None, + }) + .collect(); + + assert_eq!(nexus_zones.len(), 1); + assert_eq!(nexus_zones[0].nexus_generation, Generation::new()); + + logctx.cleanup_successful(); + } + + /// Test that adding a Nexus zone with the same image source as an existing + /// Nexus zone re-uses the same generation number + #[test] + fn test_nexus_generation_assignment_same_image_reuse() { + static TEST_NAME: &str = + "test_nexus_generation_assignment_same_image_reuse"; + let logctx = test_setup_log(TEST_NAME); + let mut rng = SimRngState::from_seed(TEST_NAME); + + // Start with a system that has one Nexus zone + let (example_system, blueprint) = + ExampleSystemBuilder::new(&logctx.log, TEST_NAME) + .nexus_count(1) + .build(); + verify_blueprint(&blueprint); + + // Get the generation of the existing nexus zone + let existing_nexus_gen = blueprint + .all_omicron_zones(BlueprintZoneDisposition::any) + .find_map(|(_, zone)| match &zone.zone_type { + BlueprintZoneType::Nexus(nexus) => { + // We're gonna add a new Nexus with this source in a moment + // - we want to be sure this image_source matches. + assert_eq!( + zone.image_source, + BlueprintZoneImageSource::InstallDataset + ); + Some(nexus.nexus_generation) + } + _ => None, + }) + .expect("should have found existing nexus"); + + let mut builder = BlueprintBuilder::new_based_on( + &logctx.log, + &blueprint, + &example_system.input, + &example_system.collection, + "test", + rng.next_planner_rng(), + ) + .expect("failed to create builder"); + + // Get a different sled + let sled_ids: Vec<_> = example_system + .input + .all_sled_ids(SledFilter::Commissioned) + .collect(); + let second_sled_id = sled_ids[1]; + let image_source = BlueprintZoneImageSource::InstallDataset; + + // Add another Nexus zone with same image source - should reuse generation + builder + .sled_add_zone_nexus_with_config( + second_sled_id, + false, + vec![], + image_source.clone(), + builder.parent_blueprint().nexus_generation, + ) + .expect("failed to add nexus zone"); + + let blueprint2 = builder.build(); + verify_blueprint(&blueprint2); + + // Find all nexus zones and verify they have the same generation + let nexus_zones: Vec<_> = blueprint2 + .all_omicron_zones(BlueprintZoneDisposition::any) + .filter_map(|(_, zone)| match &zone.zone_type { + BlueprintZoneType::Nexus(nexus) => Some(nexus), + _ => None, + }) + .collect(); + + assert_eq!(nexus_zones.len(), 2); + assert_eq!(nexus_zones[0].nexus_generation, existing_nexus_gen); + assert_eq!(nexus_zones[1].nexus_generation, existing_nexus_gen); + + logctx.cleanup_successful(); + } + + /// Test nexus generation assignment logic for different image sources + #[test] + fn test_nexus_generation_assignment_different_image_increment() { + static TEST_NAME: &str = + "test_nexus_generation_assignment_different_image_increment"; + let logctx = test_setup_log(TEST_NAME); + let mut rng = SimRngState::from_seed(TEST_NAME); + + // Start with a system that has one Nexus zone + let (example_system, blueprint) = + ExampleSystemBuilder::new(&logctx.log, TEST_NAME) + .nexus_count(1) + .build(); + verify_blueprint(&blueprint); + + // Get the generation of the existing nexus zone + let existing_nexus_gen = blueprint + .all_omicron_zones(BlueprintZoneDisposition::any) + .find_map(|(_, zone)| match &zone.zone_type { + BlueprintZoneType::Nexus(nexus) => { + assert_eq!( + zone.image_source, + BlueprintZoneImageSource::InstallDataset + ); + Some(nexus.nexus_generation) + } + _ => None, + }) + .expect("should have found existing nexus"); + + let mut builder = BlueprintBuilder::new_based_on( + &logctx.log, + &blueprint, + &example_system.input, + &example_system.collection, + "test", + rng.next_planner_rng(), + ) + .expect("failed to create builder"); + + // Get a different sled + let sled_ids: Vec<_> = example_system + .input + .all_sled_ids(SledFilter::Commissioned) + .collect(); + let second_sled_id = sled_ids[1]; + + // Use a different image source (artifact vs install dataset) + let different_image_source = BlueprintZoneImageSource::Artifact { + version: BlueprintArtifactVersion::Available { + version: ArtifactVersion::new_const("1.2.3.4"), + }, + hash: ArtifactHash([0x42; 32]), + }; + + // Add another Nexus zone with different image source - should increment generation + builder + .sled_add_zone_nexus(second_sled_id, different_image_source.clone()) + .expect("failed to add nexus zone"); + + let blueprint2 = builder.build(); + verify_blueprint(&blueprint2); + + // Find all nexus zones and verify generations + let mut nexus_zones: Vec<_> = blueprint2 + .all_omicron_zones(BlueprintZoneDisposition::any) + .filter_map(|(_, zone)| match &zone.zone_type { + BlueprintZoneType::Nexus(nexus) => Some((zone, nexus)), + _ => None, + }) + .collect(); + + // Sort by generation to ensure predictable ordering + nexus_zones.sort_by_key(|(_, nexus)| nexus.nexus_generation); + + assert_eq!(nexus_zones.len(), 2); + assert_eq!(nexus_zones[0].1.nexus_generation, existing_nexus_gen); + assert_eq!( + nexus_zones[1].1.nexus_generation, + existing_nexus_gen.next() + ); + + // Verify image sources are different + assert_eq!( + nexus_zones[0].0.image_source, + BlueprintZoneImageSource::InstallDataset + ); + assert_eq!(nexus_zones[1].0.image_source, different_image_source); + + logctx.cleanup_successful(); + } + + /// Test nexus generation assignment logic with mixed old/new image sources + /// + /// Tests a scenario where we restore redundancy with existing image source + /// while also adding zones with new image source for upgrade. + #[test] + fn test_nexus_generation_assignment_multiple_generations() { + static TEST_NAME: &str = + "test_nexus_generation_assignment_multiple_generations"; + let logctx = test_setup_log(TEST_NAME); + let mut rng = SimRngState::from_seed(TEST_NAME); + + // Start with a system with one Nexus zone using the install dataset as an image source + let (example_system, blueprint) = + ExampleSystemBuilder::new(&logctx.log, TEST_NAME) + .nsleds(3) + .nexus_count(1) + .build(); + verify_blueprint(&blueprint); + + // Get the existing nexus zone's generation (should be generation 1) + let existing_nexus_gen = blueprint + .all_omicron_zones(BlueprintZoneDisposition::any) + .find_map(|(_, zone)| match &zone.zone_type { + BlueprintZoneType::Nexus(nexus) => Some(nexus.nexus_generation), + _ => None, + }) + .expect("should have found existing nexus"); + + let mut builder = BlueprintBuilder::new_based_on( + &logctx.log, + &blueprint, + &example_system.input, + &example_system.collection, + "test", + rng.next_planner_rng(), + ) + .expect("failed to create builder"); + + let sled_ids: Vec<_> = example_system + .input + .all_sled_ids(SledFilter::Commissioned) + .collect(); + + // Define image sources: A (same as existing Nexus) and B (new) + let image_source_a = BlueprintZoneImageSource::InstallDataset; + let image_source_b = BlueprintZoneImageSource::Artifact { + version: BlueprintArtifactVersion::Available { + version: ArtifactVersion::new_const("2.0.0"), + }, + hash: ArtifactHash([0x11; 32]), + }; + + // In a single BlueprintBuilder step, add: + // 1. One zone with image source A (should reuse existing generation) + // 2. One zone with image source B (should get existing generation + 1) + builder + .sled_add_zone_nexus(sled_ids[1], image_source_a.clone()) + .expect("failed to add nexus zone with image source A"); + builder + .sled_add_zone_nexus(sled_ids[2], image_source_b.clone()) + .expect("failed to add nexus zone with image source B"); + + let blueprint2 = builder.build(); + verify_blueprint(&blueprint2); + + // Collect all nexus zones and organize by image source + let mut nexus_by_image: std::collections::HashMap< + BlueprintZoneImageSource, + Vec, + > = std::collections::HashMap::new(); + + for (_, zone) in + blueprint2.all_omicron_zones(BlueprintZoneDisposition::any) + { + if let BlueprintZoneType::Nexus(nexus) = &zone.zone_type { + nexus_by_image + .entry(zone.image_source.clone()) + .or_insert_with(Vec::new) + .push(nexus.nexus_generation); + } + } + + // Should have 2 image sources now + assert_eq!(nexus_by_image.len(), 2); + + // Image source A should have 2 zones (original + new) with same generation + let image_a_gens = nexus_by_image.get(&image_source_a).unwrap(); + assert_eq!(image_a_gens.len(), 2); + assert_eq!(image_a_gens[0], existing_nexus_gen); + assert_eq!(image_a_gens[1], existing_nexus_gen); + + // Image source B should have 1 zone with next generation + let image_b_gens = nexus_by_image.get(&image_source_b).unwrap(); + assert_eq!(image_b_gens.len(), 1); + assert_eq!(image_b_gens[0], existing_nexus_gen.next()); + + logctx.cleanup_successful(); + } + + /// Test that the validation which normally occurs as a part of + /// "sled_add_zone_nexus" - namely, the invocation of + /// "determine_nexus_generation" - throws expected errors when the + /// "next Nexus zone" generation does not match the parent blueprint's + /// value of "nexus generation". + #[test] + fn test_nexus_generation_blueprint_validation() { + static TEST_NAME: &str = "test_nexus_generation_blueprint_validation"; + let logctx = test_setup_log(TEST_NAME); + let mut rng = SimRngState::from_seed(TEST_NAME); + + // Start with a system that has one Nexus zone + let (example_system, mut blueprint) = + ExampleSystemBuilder::new(&logctx.log, TEST_NAME) + .nexus_count(1) + .build(); + verify_blueprint(&blueprint); + + // Manually modify the blueprint to create a mismatch: + // Set the top-level nexus_generation to 2, but keep the zone generation at 1 + blueprint.nexus_generation = Generation::new().next(); + + let builder = BlueprintBuilder::new_based_on( + &logctx.log, + &blueprint, + &example_system.input, + &example_system.collection, + "test", + rng.next_planner_rng(), + ) + .expect("failed to create builder"); + + let image_source = BlueprintZoneImageSource::InstallDataset; // Same as existing + + // Try to add another Nexus zone with same image source + // This should fail because existing zone has generation 1 but blueprint has generation 2 + let result = builder.determine_nexus_generation(&image_source); + + match result { + Err(Error::OldImageNexusGenerationMismatch { + expected, + actual, + }) => { + assert_eq!(expected, Generation::new().next()); // Blueprint generation + assert_eq!(actual, Generation::new()); // Zone generation + } + other => panic!( + "Expected OldImageNexusGenerationMismatch error, got: {:?}", + other + ), + } + + logctx.cleanup_successful(); + } + + /// Test nexus generation validation for new image source + #[test] + fn test_nexus_generation_blueprint_validation_new_image() { + static TEST_NAME: &str = + "test_nexus_generation_blueprint_validation_new_image"; + let logctx = test_setup_log(TEST_NAME); + let mut rng = SimRngState::from_seed(TEST_NAME); + + // Start with a system that has one Nexus zone + let (example_system, mut blueprint) = + ExampleSystemBuilder::new(&logctx.log, TEST_NAME) + .nexus_count(1) + .build(); + verify_blueprint(&blueprint); + + // The zone has generation 1 and blueprint has generation 1 + // Now modify the blueprint generation to be different from what + // the new image source logic would expect + blueprint.nexus_generation = Generation::new().next().next(); // Set to generation 3 + + let builder = BlueprintBuilder::new_based_on( + &logctx.log, + &blueprint, + &example_system.input, + &example_system.collection, + "test", + rng.next_planner_rng(), + ) + .expect("failed to create builder"); + + // Use a different image source (this should get existing generation + 1 = 2) + let different_image_source = BlueprintZoneImageSource::Artifact { + version: BlueprintArtifactVersion::Available { + version: ArtifactVersion::new_const("2.0.0"), + }, + hash: ArtifactHash([0x42; 32]), + }; + + // Try to add a Nexus zone with different image source + // This should fail because the calculated generation (2) doesn't match blueprint generation + 1 (4) + let result = + builder.determine_nexus_generation(&different_image_source); + + match result { + Err(Error::NewImageNexusGenerationMismatch { + expected, + actual, + }) => { + assert_eq!(expected, Generation::new().next().next().next()); // Blueprint generation + 1 = 4 + assert_eq!(actual, Generation::new().next()); // Calculated generation = 2 + } + other => panic!( + "Expected NewImageNexusGenerationMismatch error, got: {:?}", + other + ), + } + + logctx.cleanup_successful(); + } } diff --git a/nexus/reconfigurator/planning/src/example.rs b/nexus/reconfigurator/planning/src/example.rs index ce793980b2c..783bde303ff 100644 --- a/nexus/reconfigurator/planning/src/example.rs +++ b/nexus/reconfigurator/planning/src/example.rs @@ -480,12 +480,17 @@ impl ExampleSystemBuilder { for _ in 0..nexus_count .on(discretionary_ix, discretionary_sled_count) { + let external_tls = false; + let external_dns_servers = vec![]; + let nexus_generation = + builder.parent_blueprint().nexus_generation; builder .sled_add_zone_nexus_with_config( sled_id, - false, - vec![], + external_tls, + external_dns_servers, image_source.clone(), + nexus_generation, ) .unwrap(); } @@ -547,6 +552,24 @@ impl ExampleSystemBuilder { } let blueprint = builder.build(); + + // Find the first Nexus zone to use as the current Nexus zone ID + let current_nexus_zone_id = blueprint + .sleds + .values() + .flat_map(|sled_cfg| sled_cfg.zones.iter()) + .find_map(|zone| match &zone.zone_type { + nexus_types::deployment::BlueprintZoneType::Nexus(_) => { + Some(zone.id) + } + _ => None, + }); + + // Set the current Nexus zone ID if we found one + if let Some(nexus_zone_id) = current_nexus_zone_id { + input_builder.set_current_nexus_zone_id(Some(nexus_zone_id)); + } + for sled_cfg in blueprint.sleds.values() { for zone in sled_cfg.zones.iter() { let service_id = zone.id; diff --git a/nexus/reconfigurator/planning/src/planner.rs b/nexus/reconfigurator/planning/src/planner.rs index 505646d8458..021465c7924 100644 --- a/nexus/reconfigurator/planning/src/planner.rs +++ b/nexus/reconfigurator/planning/src/planner.rs @@ -31,6 +31,7 @@ use nexus_types::deployment::BlueprintPhysicalDiskDisposition; use nexus_types::deployment::BlueprintZoneConfig; use nexus_types::deployment::BlueprintZoneDisposition; use nexus_types::deployment::BlueprintZoneImageSource; +use nexus_types::deployment::BlueprintZoneType; use nexus_types::deployment::CockroachDbClusterVersion; use nexus_types::deployment::CockroachDbPreserveDowngrade; use nexus_types::deployment::CockroachDbSettings; @@ -42,9 +43,10 @@ use nexus_types::deployment::SledFilter; use nexus_types::deployment::TufRepoContentsError; use nexus_types::deployment::ZpoolFilter; use nexus_types::deployment::{ - CockroachdbUnsafeToShutdown, PlanningAddStepReport, - PlanningCockroachdbSettingsStepReport, PlanningDecommissionStepReport, - PlanningExpungeStepReport, PlanningMgsUpdatesStepReport, + CockroachdbUnsafeToShutdown, NexusGenerationBumpWaitingOn, + PlanningAddStepReport, PlanningCockroachdbSettingsStepReport, + PlanningDecommissionStepReport, PlanningExpungeStepReport, + PlanningMgsUpdatesStepReport, PlanningNexusGenerationBumpReport, PlanningNoopImageSourceStepReport, PlanningReport, PlanningZoneUpdatesStepReport, ZoneAddWaitingOn, ZoneUnsafeToShutdown, ZoneUpdatesWaitingOn, @@ -53,6 +55,7 @@ use nexus_types::external_api::views::PhysicalDiskPolicy; use nexus_types::external_api::views::SledPolicy; use nexus_types::external_api::views::SledState; use nexus_types::inventory::Collection; +use omicron_common::api::external::Generation; use omicron_common::disk::M2Slot; use omicron_common::policy::BOUNDARY_NTP_REDUNDANCY; use omicron_common::policy::COCKROACHDB_REDUNDANCY; @@ -111,6 +114,40 @@ const NUM_CONCURRENT_MGS_UPDATES: usize = 1; /// A receipt that `check_input_validity` has been run prior to planning. struct InputChecked; +// Details of why a zone has not yet propagated from blueprint to sled inventory +#[derive(Debug)] +#[expect(dead_code)] +struct ZonePropagationIncomplete<'a> { + zone_id: OmicronZoneUuid, + zone_kind: ZoneKind, + reason: ZonePropagationStatus<'a>, +} + +#[derive(Debug)] +#[expect(dead_code)] +enum ZonePropagationStatus<'a> { + // The current blueprint and the sled inventory disagree + // about the image source for a zone. + // + // This can mean that the sled inventory is out-of-date, or + // that a different blueprint has been applied. + ImageSourceMismatch { + bp_image_source: &'a BlueprintZoneImageSource, + inv_image_source: &'a OmicronZoneImageSource, + }, + // Although this zone appears in the blueprint, it does + // not exist on the sled's inventory. + MissingInInventory { + bp_image_source: &'a BlueprintZoneImageSource, + }, + // The last reconciliation attempt for this zone failed + ReconciliationError { + bp_image_source: &'a BlueprintZoneImageSource, + inv_image_source: &'a OmicronZoneImageSource, + message: &'a str, + }, +} + pub struct Planner<'a> { log: Logger, input: &'a PlanningInput, @@ -232,6 +269,10 @@ impl<'a> Planner<'a> { self.do_plan_zone_updates(&mgs_updates)? }; + // We may need to bump the top-level Nexus generation number + // to update Nexus zones. + let nexus_generation_bump = self.do_plan_nexus_generation_update()?; + // CockroachDB settings aren't dependent on zones, so they can be // planned independently of the rest of the system. let cockroachdb_settings = self.do_plan_cockroachdb_settings(); @@ -245,6 +286,7 @@ impl<'a> Planner<'a> { add, mgs_updates, zone_updates, + nexus_generation_bump, cockroachdb_settings, }) } @@ -975,55 +1017,110 @@ impl<'a> Planner<'a> { DiscretionaryOmicronZone::Nexus, DiscretionaryOmicronZone::Oximeter, ] { - let num_zones_to_add = - self.num_additional_zones_needed(zone_kind, report); - if num_zones_to_add == 0 { - continue; - } - // We need to add at least one zone; construct our `zone_placement` - // (or reuse the existing one if a previous loop iteration already - // created it). - let zone_placement = zone_placement.get_or_insert_with(|| { - // This constructs a picture of the sleds as we currently - // understand them, as far as which sleds have discretionary - // zones. This will remain valid as we loop through the - // `zone_kind`s in this function, as any zone additions will - // update the `zone_placement` heap in-place. - let current_discretionary_zones = self - .input - .all_sled_resources(SledFilter::Discretionary) - .filter(|(sled_id, _)| { - !report.sleds_waiting_for_ntp_zone.contains(&sled_id) - }) - .map(|(sled_id, sled_resources)| { - OmicronZonePlacementSledState { - sled_id, - num_zpools: sled_resources - .all_zpools(ZpoolFilter::InService) - .count(), - discretionary_zones: self - .blueprint - .current_sled_zones( - sled_id, - BlueprintZoneDisposition::is_in_service, - ) - .filter_map(|zone| { - DiscretionaryOmicronZone::from_zone_type( - &zone.zone_type, - ) - }) - .collect(), + // Our goal here is to make sure that if we have less redundancy for + // discretionary zones than needed, we deploy additional zones. + // + // For most zone types, we only care about the total count of that + // kind of zone, regardless of image. In contrast, for Nexus, we may + // need to reach a minimum redundancy count for multiple zone images + // (new and old) during a handoff. + let image_sources = match zone_kind { + DiscretionaryOmicronZone::Nexus => { + let old_image = self + .input + .old_repo() + .description() + .zone_image_source(zone_kind.into())?; + let new_image = self + .input + .tuf_repo() + .description() + .zone_image_source(zone_kind.into())?; + let our_image = self.lookup_current_nexus_image(); + + let mut images = vec![]; + if old_image != new_image { + // We may still want to deploy the old image alongside + // the new image: if we're running the "old version of a + // Nexus" currently, we need to ensure we have + // redundancy before the handoff completes. + if our_image.as_ref() != Some(&new_image) { + images.push(old_image); } - }); - OmicronZonePlacement::new(current_discretionary_zones) - }); - self.add_discretionary_zones( - zone_placement, - zone_kind, - num_zones_to_add, - mgs_updates, - report, - )?; + // If there is a new image for us to use, deploy it + // immediately. The new Nexus will hang around mostly + // idle until handoff is ready. + images.push(new_image.clone()); + } else { + // If there is no new image to use, use the old image. + images.push(old_image); + } + + assert!(!images.is_empty()); + images + } + _ => { + vec![self.image_source_for_new_zone( + zone_kind.into(), + mgs_updates, + )?] + } + }; + + for image_source in image_sources { + let num_zones_to_add = self.num_additional_zones_needed( + zone_kind, + &image_source, + report, + ); + if num_zones_to_add == 0 { + continue; + } + // We need to add at least one zone; construct our `zone_placement` + // (or reuse the existing one if a previous loop iteration already + // created it). + let zone_placement = zone_placement.get_or_insert_with(|| { + // This constructs a picture of the sleds as we currently + // understand them, as far as which sleds have discretionary + // zones. This will remain valid as we loop through the + // `zone_kind`s in this function, as any zone additions will + // update the `zone_placement` heap in-place. + let current_discretionary_zones = self + .input + .all_sled_resources(SledFilter::Discretionary) + .filter(|(sled_id, _)| { + !report.sleds_waiting_for_ntp_zone.contains(&sled_id) + }) + .map(|(sled_id, sled_resources)| { + OmicronZonePlacementSledState { + sled_id, + num_zpools: sled_resources + .all_zpools(ZpoolFilter::InService) + .count(), + discretionary_zones: self + .blueprint + .current_sled_zones( + sled_id, + BlueprintZoneDisposition::is_in_service, + ) + .filter_map(|zone| { + DiscretionaryOmicronZone::from_zone_type( + &zone.zone_type, + ) + }) + .collect(), + } + }); + OmicronZonePlacement::new(current_discretionary_zones) + }); + self.add_discretionary_zones( + zone_placement, + zone_kind, + num_zones_to_add, + image_source, + report, + )?; + } } Ok(()) @@ -1033,7 +1130,8 @@ impl<'a> Planner<'a> { /// additional zones needed of the given `zone_kind` to satisfy the policy. fn num_additional_zones_needed( &mut self, - zone_kind: DiscretionaryOmicronZone, + discretionary_zone_kind: DiscretionaryOmicronZone, + image_source: &BlueprintZoneImageSource, report: &mut PlanningAddStepReport, ) -> usize { // Count the number of `kind` zones on all in-service sleds. This @@ -1042,7 +1140,7 @@ impl<'a> Planner<'a> { // decommissioned. let mut num_existing_kind_zones = 0; for sled_id in self.input.all_sled_ids(SledFilter::InService) { - let zone_kind = ZoneKind::from(zone_kind); + let zone_kind = ZoneKind::from(discretionary_zone_kind); // Internal DNS is special: if we have an expunged internal DNS zone // that might still be running, we want to count it here: we can't @@ -1057,11 +1155,20 @@ impl<'a> Planner<'a> { num_existing_kind_zones += self .blueprint .current_sled_zones(sled_id, disposition_filter) - .filter(|z| z.zone_type.kind() == zone_kind) + .filter(|z| { + let matches_kind = z.zone_type.kind() == zone_kind; + let matches_image = z.image_source == *image_source; + match discretionary_zone_kind { + DiscretionaryOmicronZone::Nexus => { + matches_kind && matches_image + } + _ => matches_kind, + } + }) .count(); } - let target_count = match zone_kind { + let target_count = match discretionary_zone_kind { DiscretionaryOmicronZone::BoundaryNtp => { self.input.target_boundary_ntp_zone_count() } @@ -1103,7 +1210,7 @@ impl<'a> Planner<'a> { target_count.saturating_sub(num_existing_kind_zones); if num_zones_to_add == 0 { report.sufficient_zones_exist( - ZoneKind::from(zone_kind).report_str(), + ZoneKind::from(discretionary_zone_kind).report_str(), target_count, num_existing_kind_zones, ); @@ -1121,7 +1228,7 @@ impl<'a> Planner<'a> { zone_placement: &mut OmicronZonePlacement, kind: DiscretionaryOmicronZone, num_zones_to_add: usize, - mgs_updates: &PlanningMgsUpdatesStepReport, + image_source: BlueprintZoneImageSource, report: &mut PlanningAddStepReport, ) -> Result<(), Error> { for i in 0..num_zones_to_add { @@ -1141,46 +1248,45 @@ impl<'a> Planner<'a> { } }; - let image_source = - self.image_source_for_new_zone(kind.into(), mgs_updates)?; + let image = image_source.clone(); match kind { DiscretionaryOmicronZone::BoundaryNtp => { self.blueprint.sled_promote_internal_ntp_to_boundary_ntp( - sled_id, - image_source, + sled_id, image, )? } - DiscretionaryOmicronZone::Clickhouse => self - .blueprint - .sled_add_zone_clickhouse(sled_id, image_source)?, + DiscretionaryOmicronZone::Clickhouse => { + self.blueprint.sled_add_zone_clickhouse(sled_id, image)? + } DiscretionaryOmicronZone::ClickhouseKeeper => self .blueprint - .sled_add_zone_clickhouse_keeper(sled_id, image_source)?, + .sled_add_zone_clickhouse_keeper(sled_id, image)?, DiscretionaryOmicronZone::ClickhouseServer => self .blueprint - .sled_add_zone_clickhouse_server(sled_id, image_source)?, - DiscretionaryOmicronZone::CockroachDb => self - .blueprint - .sled_add_zone_cockroachdb(sled_id, image_source)?, + .sled_add_zone_clickhouse_server(sled_id, image)?, + DiscretionaryOmicronZone::CockroachDb => { + self.blueprint.sled_add_zone_cockroachdb(sled_id, image)? + } DiscretionaryOmicronZone::CruciblePantry => self .blueprint - .sled_add_zone_crucible_pantry(sled_id, image_source)?, - DiscretionaryOmicronZone::InternalDns => self - .blueprint - .sled_add_zone_internal_dns(sled_id, image_source)?, - DiscretionaryOmicronZone::ExternalDns => self - .blueprint - .sled_add_zone_external_dns(sled_id, image_source)?, + .sled_add_zone_crucible_pantry(sled_id, image)?, + DiscretionaryOmicronZone::InternalDns => { + self.blueprint.sled_add_zone_internal_dns(sled_id, image)? + } + DiscretionaryOmicronZone::ExternalDns => { + self.blueprint.sled_add_zone_external_dns(sled_id, image)? + } DiscretionaryOmicronZone::Nexus => { - self.blueprint.sled_add_zone_nexus(sled_id, image_source)? + self.blueprint.sled_add_zone_nexus(sled_id, image)? + } + DiscretionaryOmicronZone::Oximeter => { + self.blueprint.sled_add_zone_oximeter(sled_id, image)? } - DiscretionaryOmicronZone::Oximeter => self - .blueprint - .sled_add_zone_oximeter(sled_id, image_source)?, }; report.discretionary_zone_placed( sled_id, ZoneKind::from(kind).report_str(), + &image_source, ); } @@ -1262,13 +1368,9 @@ impl<'a> Planner<'a> { Ok(PlanningMgsUpdatesStepReport::new(pending_updates)) } - /// Update at most one existing zone to use a new image source. - fn do_plan_zone_updates( - &mut self, - mgs_updates: &PlanningMgsUpdatesStepReport, - ) -> Result { - let mut report = PlanningZoneUpdatesStepReport::new(); - + fn get_zones_not_yet_propagated_to_inventory( + &self, + ) -> Vec> { // We are only interested in non-decommissioned sleds. let sleds = self .input @@ -1283,31 +1385,7 @@ impl<'a> Planner<'a> { .map(|(z, sa_result)| (z.id, (&z.image_source, sa_result))) .collect::>(); - #[derive(Debug)] - #[expect(dead_code)] - struct ZoneCurrentlyUpdating<'a> { - zone_id: OmicronZoneUuid, - zone_kind: ZoneKind, - reason: UpdatingReason<'a>, - } - - #[derive(Debug)] - #[expect(dead_code)] - enum UpdatingReason<'a> { - ImageSourceMismatch { - bp_image_source: &'a BlueprintZoneImageSource, - inv_image_source: &'a OmicronZoneImageSource, - }, - MissingInInventory { - bp_image_source: &'a BlueprintZoneImageSource, - }, - ReconciliationError { - bp_image_source: &'a BlueprintZoneImageSource, - inv_image_source: &'a OmicronZoneImageSource, - message: &'a str, - }, - } - + let mut updating = vec![]; for &sled_id in &sleds { // Build a list of zones currently in the blueprint but where // inventory has a mismatch or does not know about the zone. @@ -1315,7 +1393,7 @@ impl<'a> Planner<'a> { // What about the case where a zone is in inventory but not in the // blueprint? See // https://github.com/oxidecomputer/omicron/issues/8589. - let zones_currently_updating = self + let mut zones_currently_updating = self .blueprint .current_sled_zones( sled_id, @@ -1338,13 +1416,14 @@ impl<'a> Planner<'a> { ConfigReconcilerInventoryResult::Ok, )) => { // The inventory and blueprint image sources differ. - Some(ZoneCurrentlyUpdating { + Some(ZonePropagationIncomplete { zone_id: zone.id, zone_kind: zone.kind(), - reason: UpdatingReason::ImageSourceMismatch { - bp_image_source: &zone.image_source, - inv_image_source, - }, + reason: + ZonePropagationStatus::ImageSourceMismatch { + bp_image_source: &zone.image_source, + inv_image_source, + }, }) } Some(( @@ -1354,40 +1433,60 @@ impl<'a> Planner<'a> { // The inventory reports this zone but there was an // error reconciling it (most likely an error // starting the zone). - Some(ZoneCurrentlyUpdating { + Some(ZonePropagationIncomplete { zone_id: zone.id, zone_kind: zone.kind(), - reason: UpdatingReason::ReconciliationError { - bp_image_source: &zone.image_source, - inv_image_source, - message, - }, + reason: + ZonePropagationStatus::ReconciliationError { + bp_image_source: &zone.image_source, + inv_image_source, + message, + }, }) } None => { // The blueprint has a zone that inventory does not have. - Some(ZoneCurrentlyUpdating { + Some(ZonePropagationIncomplete { zone_id: zone.id, zone_kind: zone.kind(), - reason: UpdatingReason::MissingInInventory { - bp_image_source: &zone.image_source, - }, + reason: + ZonePropagationStatus::MissingInInventory { + bp_image_source: &zone.image_source, + }, }) } } }) .collect::>(); + updating.append(&mut zones_currently_updating); + } + updating + } - if !zones_currently_updating.is_empty() { - info!( - self.log, "some zones not yet up-to-date"; - "sled_id" => %sled_id, - "zones_currently_updating" => ?zones_currently_updating, - ); - return Ok(report); - } + /// Update at most one existing zone to use a new image source. + fn do_plan_zone_updates( + &mut self, + mgs_updates: &PlanningMgsUpdatesStepReport, + ) -> Result { + let mut report = PlanningZoneUpdatesStepReport::new(); + + let zones_currently_updating = + self.get_zones_not_yet_propagated_to_inventory(); + if !zones_currently_updating.is_empty() { + info!( + self.log, "some zones not yet up-to-date"; + "zones_currently_updating" => ?zones_currently_updating, + ); + return Ok(report); } + // We are only interested in non-decommissioned sleds. + let sleds = self + .input + .all_sleds(SledFilter::Commissioned) + .map(|(id, _details)| id) + .collect::>(); + // Find out of date zones, as defined by zones whose image source does // not match what it should be based on our current target release. let target_release = self.input.tuf_repo().description(); @@ -1439,10 +1538,7 @@ impl<'a> Planner<'a> { if !self.can_zone_be_shut_down_safely(zone, &mut report) { return false; } - match self.is_zone_ready_for_update( - zone.zone_type.kind(), - mgs_updates, - ) { + match self.is_zone_ready_for_update(mgs_updates) { Ok(true) => true, Ok(false) => false, Err(err) => { @@ -1752,6 +1848,125 @@ impl<'a> Planner<'a> { Ok(reasons) } + // Determines whether or not the top-level "nexus_generation" + // value should be increased. + // + // Doing so will be a signal for all running Nexus instances at + // lower versions to start quiescing, and to perform handoff. + fn do_plan_nexus_generation_update( + &mut self, + ) -> Result { + let mut report = PlanningNexusGenerationBumpReport::new(); + + // Nexus can only be updated if all non-Nexus zones have been + // updated, i.e., their image source is an artifact from the new + // repo. + let new_repo = self.input.tuf_repo().description(); + + // If we don't actually have a TUF repo here, we can't do + // updates anyway; any return value is fine. + if new_repo.tuf_repo().is_none() { + return Ok(report); + } + + // Check that all in-service zones (other than Nexus) on all + // sleds have an image source consistent with `new_repo`. + for sled_id in self.blueprint.sled_ids_with_zones() { + for z in self.blueprint.current_sled_zones( + sled_id, + BlueprintZoneDisposition::is_in_service, + ) { + let kind = z.zone_type.kind(); + if kind != ZoneKind::Nexus + && z.image_source != new_repo.zone_image_source(kind)? + { + report.set_waiting_on( + NexusGenerationBumpWaitingOn::NonNexusZoneUpdate, + ); + return Ok(report); + } + } + } + + // Confirm that we have new nexuses at the desired generation number + let current_generation = self.blueprint.nexus_generation(); + let proposed_generation = self.blueprint.nexus_generation().next(); + let mut out_of_date_nexuses_at_current_gen = 0; + let mut nexuses_at_next_gen = 0; + for sled_id in self.blueprint.sled_ids_with_zones() { + for z in self.blueprint.current_sled_zones( + sled_id, + BlueprintZoneDisposition::is_in_service, + ) { + if let BlueprintZoneType::Nexus(nexus_zone) = &z.zone_type { + if nexus_zone.nexus_generation == proposed_generation { + nexuses_at_next_gen += 1; + } + + if nexus_zone.nexus_generation == current_generation + && z.image_source + != new_repo.zone_image_source(z.zone_type.kind())? + { + out_of_date_nexuses_at_current_gen += 1; + } + } + } + } + + if out_of_date_nexuses_at_current_gen == 0 { + // If all the current-generation Nexuses are "up-to-date", then we may have + // just completed handoff successfully. In this case, there's nothing to report. + return Ok(report); + } else { + // If there aren't enough Nexuses at the next generation, quiescing could + // be a dangerous operation. Blueprint execution should be able to continue + // even if the new Nexuses haven't started, but to be conservative, we'll wait + // for the target count. + if nexuses_at_next_gen < self.input.target_nexus_zone_count() { + report.set_waiting_on( + NexusGenerationBumpWaitingOn::NewNexusBringup, + ); + return Ok(report); + } + } + + // Confirm that all blueprint zones have propagated to inventory + let zones_currently_updating = + self.get_zones_not_yet_propagated_to_inventory(); + if !zones_currently_updating.is_empty() { + info!( + self.log, "some zones not yet up-to-date"; + "zones_currently_updating" => ?zones_currently_updating, + ); + report + .set_waiting_on(NexusGenerationBumpWaitingOn::ZonePropagation); + return Ok(report); + } + + // If we're here: + // - There's a new repo + // - The current generation of Nexuses are considered "out-of-date" + // - There are Nexuses running with "current generation + 1" + // - All non-Nexus zones have updated + // - All other blueprint zones have propagated to inventory + // + // If all of these are true, the "zone update" portion of the planner + // has completed, aside from Nexus, and we're ready for old Nexuses + // to start quiescing. + // + // Blueprint planning and execution will be able to continue past this + // point, for the purposes of restoring redundancy, expunging sleds, + // etc. However, making this committment will also halt the creation of + // new sagas temporarily, as handoff from old to new Nexuses occurs. + self.blueprint.set_nexus_generation( + self.blueprint.nexus_generation(), + proposed_generation, + )?; + report.set_next_generation(proposed_generation); + + Ok(report) + } + fn do_plan_cockroachdb_settings( &mut self, ) -> PlanningCockroachdbSettingsStepReport { @@ -1852,63 +2067,67 @@ impl<'a> Planner<'a> { zone_kind: ZoneKind, mgs_updates: &PlanningMgsUpdatesStepReport, ) -> Result { - let source_repo = - if self.is_zone_ready_for_update(zone_kind, mgs_updates)? { - self.input.tuf_repo().description() - } else { - self.input.old_repo().description() - }; + let source_repo = if self.is_zone_ready_for_update(mgs_updates)? { + self.input.tuf_repo().description() + } else { + self.input.old_repo().description() + }; source_repo.zone_image_source(zone_kind) } - /// Return `true` iff a zone of the given kind is ready to be updated; - /// i.e., its dependencies have been updated. + /// Return `true` iff a zone is ready to be updated; i.e., its dependencies + /// have been updated. fn is_zone_ready_for_update( &self, - zone_kind: ZoneKind, mgs_updates: &PlanningMgsUpdatesStepReport, ) -> Result { - // We return false regardless of `zone_kind` if there are still + // We return false for all zone kinds if there are still // pending updates for components earlier in the update ordering // than zones: RoT bootloader / RoT / SP / Host OS. if !mgs_updates.is_empty() { return Ok(false); } - match zone_kind { - ZoneKind::Nexus => { - // Nexus can only be updated if all non-Nexus zones have been - // updated, i.e., their image source is an artifact from the new - // repo. - let new_repo = self.input.tuf_repo().description(); - - // If we don't actually have a TUF repo here, we can't do - // updates anyway; any return value is fine. - if new_repo.tuf_repo().is_none() { - return Ok(false); + Ok(true) + } + + fn lookup_current_nexus_image(&self) -> Option { + // Get the current Nexus zone ID from the planning input + let current_nexus_zone_id = self.input.current_nexus_zone_id()?; + + // Look up our current Nexus zone in the blueprint to get its image + self.blueprint + .parent_blueprint() + .all_omicron_zones(BlueprintZoneDisposition::is_in_service) + .find_map(|(_, blueprint_zone)| { + if blueprint_zone.id == current_nexus_zone_id { + Some(blueprint_zone.image_source.clone()) + } else { + None } + }) + } - // Check that all in-service zones (other than Nexus) on all - // sleds have an image source consistent with `new_repo`. - for sled_id in self.blueprint.sled_ids_with_zones() { - for z in self.blueprint.current_sled_zones( - sled_id, - BlueprintZoneDisposition::is_in_service, - ) { - let kind = z.zone_type.kind(); - if kind != ZoneKind::Nexus - && z.image_source - != new_repo.zone_image_source(kind)? - { - return Ok(false); + fn lookup_current_nexus_generation(&self) -> Option { + // Get the current Nexus zone ID from the planning input + let current_nexus_zone_id = self.input.current_nexus_zone_id()?; + + // Look up our current Nexus zone in the blueprint to get its generation + self.blueprint + .parent_blueprint() + .all_omicron_zones(BlueprintZoneDisposition::is_in_service) + .find_map(|(_, blueprint_zone)| { + if blueprint_zone.id == current_nexus_zone_id { + match &blueprint_zone.zone_type { + BlueprintZoneType::Nexus(nexus_zone) => { + Some(nexus_zone.nexus_generation) } + _ => None, } + } else { + None } - - Ok(true) - } - _ => Ok(true), // other zone kinds have no special dependencies - } + }) } /// Return `true` iff we believe a zone can safely be shut down; e.g., any @@ -2089,6 +2308,47 @@ impl<'a> Planner<'a> { false } } + ZoneKind::Nexus => { + // Get the nexus_generation of the zone being considered for shutdown + let zone_nexus_generation = match &zone.zone_type { + BlueprintZoneType::Nexus(nexus_zone) => { + nexus_zone.nexus_generation + } + _ => unreachable!("zone kind is Nexus but type is not"), + }; + + let Some(current_gen) = self.lookup_current_nexus_generation() + else { + // If we don't know the current Nexus zone ID, or its + // generation, we can't perform the handoff safety check. + report.unsafe_zone( + zone, + Nexus { + zone_generation: zone_nexus_generation, + current_nexus_generation: None, + }, + ); + return false; + }; + + // It's only safe to shut down if handoff has occurred. + // + // That only happens when the current generation of Nexus (the + // one running right now) is greater than the zone we're + // considering expunging. + if current_gen <= zone_nexus_generation { + report.unsafe_zone( + zone, + Nexus { + zone_generation: zone_nexus_generation, + current_nexus_generation: Some(current_gen), + }, + ); + return false; + } + + true + } _ => true, // other zone kinds have no special safety checks } } @@ -5619,8 +5879,8 @@ pub(crate) mod test { /// Ensure that dependent zones (here just Crucible Pantry) are updated /// before Nexus. #[test] - fn test_update_crucible_pantry() { - static TEST_NAME: &str = "update_crucible_pantry"; + fn test_update_crucible_pantry_before_nexus() { + static TEST_NAME: &str = "update_crucible_pantry_before_nexus"; let logctx = test_setup_log(TEST_NAME); let log = logctx.log.clone(); @@ -5727,18 +5987,18 @@ pub(crate) mod test { }; } - // Request another Nexus zone. - input_builder.policy_mut().target_nexus_zone_count = - input_builder.policy_mut().target_nexus_zone_count + 1; - let input = input_builder.build(); + // Nexus should deploy new zones, but keep the old ones running. + let expected_new_nexus_zones = + input_builder.policy_mut().target_nexus_zone_count; + example.input = input_builder.build(); - // Check that there is a new nexus zone that does *not* use the new - // artifact (since not all of its dependencies are updated yet). + // Check that there are new nexus zones deployed, though handoff is + // incomplete (since not all of its dependencies are updated yet). update_collection_from_blueprint(&mut example, &blueprint1); let blueprint2 = Planner::new_based_on( log.clone(), &blueprint1, - &input, + &example.input, "test_blueprint3", &example.collection, PlannerRng::from_seed((TEST_NAME, "bp3")), @@ -5748,6 +6008,7 @@ pub(crate) mod test { .expect("can't re-plan for new Nexus zone"); { let summary = blueprint2.diff_since_blueprint(&blueprint1); + let mut modified_sleds = 0; for sled in summary.diff.sleds.modified_values_diff() { assert!(sled.zones.removed.is_empty()); assert_eq!(sled.zones.added.len(), 1); @@ -5756,11 +6017,10 @@ pub(crate) mod test { &added.zone_type, BlueprintZoneType::Nexus(_) )); - assert!(matches!( - &added.image_source, - BlueprintZoneImageSource::InstallDataset - )); + assert_eq!(&added.image_source, &image_source); + modified_sleds += 1; } + assert_eq!(modified_sleds, expected_new_nexus_zones); } // We should now have three sets of expunge/add iterations for the @@ -5772,7 +6032,7 @@ pub(crate) mod test { let blueprint = Planner::new_based_on( log.clone(), &parent, - &input, + &example.input, &blueprint_name, &example.collection, PlannerRng::from_seed((TEST_NAME, &blueprint_name)), @@ -5844,17 +6104,32 @@ pub(crate) mod test { .all_omicron_zones(BlueprintZoneDisposition::is_in_service) .filter(|(_, z)| is_old_nexus(z)) .count(), - NEXUS_REDUNDANCY + 1, + NEXUS_REDUNDANCY, + ); + assert_eq!( + blueprint8 + .all_omicron_zones(BlueprintZoneDisposition::is_in_service) + .filter(|(_, z)| is_up_to_date_nexus(z)) + .count(), + NEXUS_REDUNDANCY, + ); + + // We have to pretend that we're running the "Newer Nexus" to shut down + // the old Nexuses. If we don't do this: it's as if handoff has not + // happened, and the old Nexuses cannot shut down. + set_current_nexus_to_highest_generation( + &mut example.input, + &blueprint8, ); let mut parent = blueprint8; - for i in 9..=16 { - update_collection_from_blueprint(&mut example, &parent); + for i in 9..=12 { + update_collection_from_blueprint(&mut example, &parent); let blueprint_name = format!("blueprint{i}"); let blueprint = Planner::new_based_on( log.clone(), &parent, - &input, + &example.input, &blueprint_name, &example.collection, PlannerRng::from_seed((TEST_NAME, &blueprint_name)), @@ -5865,41 +6140,72 @@ pub(crate) mod test { { let summary = blueprint.diff_since_blueprint(&parent); + assert!(summary.has_changes(), "No changes at iteration {i}"); for sled in summary.diff.sleds.modified_values_diff() { - if i % 2 == 1 { - assert!(sled.zones.added.is_empty()); - assert!(sled.zones.removed.is_empty()); - } else { - assert!(sled.zones.removed.is_empty()); - assert_eq!(sled.zones.added.len(), 1); - let added = sled.zones.added.values().next().unwrap(); + assert!(sled.zones.added.is_empty()); + assert!(sled.zones.removed.is_empty()); + for modified_zone in sled.zones.modified_values_diff() { + // We're only modifying Nexus zones on the old image assert!(matches!( - &added.zone_type, + *modified_zone.zone_type.before, BlueprintZoneType::Nexus(_) )); - assert_eq!(added.image_source, image_source); + assert_eq!( + *modified_zone.image_source.before, + BlueprintZoneImageSource::InstallDataset + ); + + // If the zone was previously in-service, it gets + // expunged. + if modified_zone.disposition.before.is_in_service() { + assert!( + modified_zone.disposition.after.is_expunged(), + ); + } + + // If the zone was previously expunged and not ready for + // cleanup, it should be marked ready-for-cleanup + if modified_zone.disposition.before.is_expunged() + && !modified_zone + .disposition + .before + .is_ready_for_cleanup() + { + assert!( + modified_zone + .disposition + .after + .is_ready_for_cleanup(), + ); + } } } } - parent = blueprint; } // Everything's up-to-date in Kansas City! - let blueprint16 = parent; + let blueprint12 = parent; assert_eq!( - blueprint16 + blueprint12 .all_omicron_zones(BlueprintZoneDisposition::is_in_service) .filter(|(_, z)| is_up_to_date_nexus(z)) .count(), - NEXUS_REDUNDANCY + 1, + NEXUS_REDUNDANCY, + ); + assert_eq!( + blueprint12 + .all_omicron_zones(BlueprintZoneDisposition::is_in_service) + .filter(|(_, z)| is_old_nexus(z)) + .count(), + 0, ); - update_collection_from_blueprint(&mut example, &blueprint16); + update_collection_from_blueprint(&mut example, &blueprint12); assert_planning_makes_no_changes( &logctx.log, - &blueprint16, - &input, + &blueprint12, + &example.input, &example.collection, TEST_NAME, ); @@ -7064,6 +7370,47 @@ pub(crate) mod test { logctx.cleanup_successful(); } + // Updates the PlanningInput to pretend like we're running + // from whichever Nexus has the highest "nexus_generation" value. + fn set_current_nexus_to_highest_generation( + input: &mut PlanningInput, + blueprint: &Blueprint, + ) { + let mut current_gen = + if let Some(current_nexus_id) = input.current_nexus_zone_id() { + blueprint + .sleds + .values() + .find_map(|sled| { + for zone in &sled.zones { + if zone.id == current_nexus_id { + if let BlueprintZoneType::Nexus(nexus_config) = + &zone.zone_type + { + return Some(nexus_config.nexus_generation); + } + } + } + None + }) + .expect("Cannot find current Nexus zone in blueprint") + } else { + Generation::new() + }; + + for sled_config in blueprint.sleds.values() { + for zone in &sled_config.zones { + if let BlueprintZoneType::Nexus(nexus_config) = &zone.zone_type + { + if nexus_config.nexus_generation > current_gen { + input.set_current_nexus_zone_id(zone.id); + current_gen = nexus_config.nexus_generation; + } + } + } + } + } + /// Ensure that planning to update all zones terminates. #[test] fn test_update_all_zones() { @@ -7121,13 +7468,13 @@ pub(crate) mod test { ), }; input_builder.policy_mut().tuf_repo = tuf_repo; - let input = input_builder.build(); + let mut input = input_builder.build(); /// Expected number of planner iterations required to converge. /// If incidental planner work changes this value occasionally, /// that's fine; but if we find we're changing it all the time, /// we should probably drop it and keep just the maximum below. - const EXP_PLANNING_ITERATIONS: usize = 57; + const EXP_PLANNING_ITERATIONS: usize = 55; /// Planning must not take more than this number of iterations. const MAX_PLANNING_ITERATIONS: usize = 100; @@ -7148,7 +7495,9 @@ pub(crate) mod test { ) .expect("can't create planner") .plan() - .unwrap_or_else(|_| panic!("can't re-plan after {i} iterations")); + .unwrap_or_else(|err| { + panic!("can't re-plan after {i} iterations: {err}") + }); assert_eq!(blueprint.report.blueprint_id, blueprint.id); eprintln!("{}\n", blueprint.report); @@ -7180,9 +7529,422 @@ pub(crate) mod test { } } + // If there is a newer Nexus, we must jump to it to expunge + // the older Nexus zones. + set_current_nexus_to_highest_generation(&mut input, &blueprint); + parent = blueprint; } panic!("did not converge after {MAX_PLANNING_ITERATIONS} iterations"); } + + struct BlueprintGenerator { + log: Logger, + example: ExampleSystem, + blueprint: Blueprint, + rng: SimRngState, + target_release_generation: Generation, + } + + impl BlueprintGenerator { + fn new( + log: Logger, + example: ExampleSystem, + blueprint: Blueprint, + rng: SimRngState, + ) -> Self { + Self { + log, + example, + blueprint, + rng, + target_release_generation: Generation::new(), + } + } + + fn create_image_at_version( + version: &ArtifactVersion, + ) -> BlueprintZoneImageSource { + let fake_hash = ArtifactHash([0; 32]); + BlueprintZoneImageSource::Artifact { + version: BlueprintArtifactVersion::Available { + version: version.clone(), + }, + hash: fake_hash, + } + } + + // - Bumps the target_release_generation + // - Sets a new "tuf_repo" as part of the "example.input" + // - The system version is hard-coded as "2.0.0" + // - Sets artifacts in the repo to `artifacts` + fn set_new_tuf_repo_with_artifacts( + &mut self, + artifacts: Vec, + ) { + let mut input_builder = self.example.input.clone().into_builder(); + let fake_hash = ArtifactHash([0; 32]); + self.target_release_generation = + self.target_release_generation.next(); + + let tuf_repo = TufRepoPolicy { + target_release_generation: self.target_release_generation, + description: TargetReleaseDescription::TufRepo( + TufRepoDescription { + repo: TufRepoMeta { + hash: fake_hash, + targets_role_version: 0, + valid_until: Utc::now(), + system_version: Version::new(2, 0, 0), + file_name: String::from(""), + }, + artifacts, + }, + ), + }; + + input_builder.policy_mut().tuf_repo = tuf_repo; + self.example.input = input_builder.build(); + } + + fn set_old_tuf_repo_to_target(&mut self) { + let mut input_builder = self.example.input.clone().into_builder(); + input_builder.policy_mut().old_repo = + self.example.input.tuf_repo().clone(); + self.example.input = input_builder.build(); + } + + // Plans a new blueprint, validates it, and returns it + // + // Does not set the current blueprint to this new value + #[track_caller] + fn plan_new_blueprint(&mut self, name: &str) -> Blueprint { + let planner = Planner::new_based_on( + self.log.clone(), + &self.blueprint, + &self.example.input, + name, + &self.example.collection, + self.rng.next_planner_rng(), + ) + .expect("can't create planner"); + let bp = planner.plan().expect("planning succeeded"); + verify_blueprint(&bp); + bp + } + + // Asserts that a new blueprint, if generated, will make no changes + #[track_caller] + fn assert_child_bp_makes_no_changes( + &self, + child_blueprint: &Blueprint, + ) { + verify_blueprint(&child_blueprint); + let summary = child_blueprint.diff_since_blueprint(&self.blueprint); + assert_eq!( + summary.diff.sleds.added.len(), + 0, + "{}", + summary.display() + ); + assert_eq!( + summary.diff.sleds.removed.len(), + 0, + "{}", + summary.display() + ); + assert_eq!( + summary.diff.sleds.modified().count(), + 0, + "{}", + summary.display() + ); + } + + // Asserts that a new blueprint, if generated, will have no report. + // + // This function explicitly ignores the "noop_image_source" report. + // + // NOTE: More reports can be added, but we aren't using + // "PlanningReport::is_empty()", because some checks (e.g. + // noop_image_source) are almost always non-empty. + #[track_caller] + fn assert_child_bp_has_no_report(&self, child_blueprint: &Blueprint) { + verify_blueprint(&child_blueprint); + let summary = child_blueprint.diff_since_blueprint(&self.blueprint); + + assert!( + child_blueprint.report.expunge.is_empty() + && child_blueprint.report.decommission.is_empty() + && child_blueprint.report.mgs_updates.is_empty() + && child_blueprint.report.add.is_empty() + && child_blueprint.report.zone_updates.is_empty() + && child_blueprint.report.nexus_generation_bump.is_empty() + && child_blueprint.report.cockroachdb_settings.is_empty(), + "Blueprint Summary: {}\n + Planning report is not empty: {}", + summary.display(), + child_blueprint.report, + ); + } + + // Updates the input inventory to reflect changes from the blueprint + fn update_inventory_from_blueprint(&mut self) { + update_collection_from_blueprint( + &mut self.example, + &self.blueprint, + ); + } + + // Use the "highest generation Nexus". + // + // This effectively changes "which Nexus is trying to perform planning". + fn set_current_nexus_to_highest_generation(&mut self) { + set_current_nexus_to_highest_generation( + &mut self.example.input, + &self.blueprint, + ); + } + } + + #[test] + fn test_nexus_generation_update() { + static TEST_NAME: &str = "test_nexus_generation_update"; + let logctx = test_setup_log(TEST_NAME); + + // Use our example system with multiple Nexus zones + let mut rng = SimRngState::from_seed(TEST_NAME); + let (example, blueprint) = ExampleSystemBuilder::new_with_rng( + &logctx.log, + rng.next_system_rng(), + ) + .nexus_count(3) // Ensure we have multiple Nexus zones + .build(); + verify_blueprint(&blueprint); + + let mut bp_generator = BlueprintGenerator::new( + logctx.log.clone(), + example, + blueprint, + rng, + ); + + // We shouldn't try to bump the generation number without a new TUF + // repo. + let new_bp = bp_generator.plan_new_blueprint("no-op"); + bp_generator.assert_child_bp_makes_no_changes(&new_bp); + bp_generator.assert_child_bp_has_no_report(&new_bp); + + // Initially, all zones should be sourced from the install dataset + assert!( + bp_generator + .blueprint + .all_omicron_zones(BlueprintZoneDisposition::is_in_service) + .all(|(_, z)| matches!( + z.image_source, + BlueprintZoneImageSource::InstallDataset + )) + ); + + // Set up a TUF repo with new artifacts + let artifact_version = + ArtifactVersion::new_static("2.0.0-nexus-gen-test") + .expect("can't parse artifact version"); + bp_generator.set_new_tuf_repo_with_artifacts( + create_artifacts_at_version(&artifact_version), + ); + let image_source = + BlueprintGenerator::create_image_at_version(&artifact_version); + + // Check: Initially, nexus generation update should be blocked because + // non-Nexus zones haven't been updated yet + { + let new_bp = + bp_generator.plan_new_blueprint("test_blocked_by_non_nexus"); + // The blueprint should have a report showing what's blocked + assert!(new_bp.report.nexus_generation_bump.waiting_on.is_some()); + assert!( + matches!( + new_bp.report.nexus_generation_bump.waiting_on, + Some(NexusGenerationBumpWaitingOn::NonNexusZoneUpdate) + ), + "Unexpected Nexus Generation report: {:?}", + new_bp.report.nexus_generation_bump + ); + } + + // Manually update all non-Nexus zones to the new image source + for sled_config in bp_generator.blueprint.sleds.values_mut() { + for mut zone in &mut sled_config.zones { + if zone.zone_type.kind() != ZoneKind::Nexus { + zone.image_source = image_source.clone(); + } + } + } + bp_generator.update_inventory_from_blueprint(); + + // Check: Now nexus generation update should be blocked by lack of new Nexus zones + let old_generation = bp_generator.blueprint.nexus_generation; + let new_bp = + bp_generator.plan_new_blueprint("test_blocked_by_new_nexus"); + { + assert_eq!(new_bp.nexus_generation, old_generation); + + let summary = new_bp.diff_since_blueprint(&bp_generator.blueprint); + assert_eq!( + summary.total_zones_added(), + bp_generator.example.input.target_nexus_zone_count() + ); + assert_eq!(summary.total_zones_removed(), 0); + assert_eq!(summary.total_zones_modified(), 0); + + // Should be blocked by new Nexus bringup + assert!( + matches!( + new_bp.report.nexus_generation_bump.waiting_on, + Some(NexusGenerationBumpWaitingOn::ZonePropagation) + ), + "Unexpected Nexus Generation report: {:?}", + new_bp.report.nexus_generation_bump + ); + } + + // Check: If we try generating a new blueprint, we're still stuck behind + // propagation to inventory. + // + // We'll refuse to bump the top-level generation number (which would + // begin quiescing old Nexuses) until we've seen that the new nexus + // zones are up. + bp_generator.blueprint = new_bp; + { + let new_bp = + bp_generator.plan_new_blueprint("wait_for_propagation"); + assert_eq!(new_bp.nexus_generation, old_generation); + + let summary = new_bp.diff_since_blueprint(&bp_generator.blueprint); + assert_eq!(summary.total_zones_added(), 0); + assert_eq!(summary.total_zones_removed(), 0); + assert_eq!(summary.total_zones_modified(), 0); + assert!( + matches!( + new_bp.report.nexus_generation_bump.waiting_on, + Some(NexusGenerationBumpWaitingOn::ZonePropagation) + ), + "Unexpected Nexus Generation report: {:?}", + new_bp.report.nexus_generation_bump + ); + } + + // Make the new Nexus zones appear in inventory + bp_generator.update_inventory_from_blueprint(); + + // Check: Now nexus generation update should succeed + let new_bp = bp_generator.plan_new_blueprint("update_generation"); + // Finally, the top-level Nexus generation should get bumped. + assert_eq!(new_bp.nexus_generation, old_generation.next()); + bp_generator.blueprint = new_bp; + + // Check: After the generation bump, further planning should make no changes + bp_generator.update_inventory_from_blueprint(); + let new_bp = bp_generator.plan_new_blueprint("no-op"); + bp_generator.assert_child_bp_makes_no_changes(&new_bp); + + // However, there will be a report of "three Nexus zones that aren't + // ready to shut down". The blueprint generator still thinks it's + // running from one of these "old Nexuses". + let unsafe_to_shutdown_zones = &new_bp.report.zone_updates.unsafe_zones; + assert_eq!( + unsafe_to_shutdown_zones.len(), + bp_generator.example.input.target_nexus_zone_count() + ); + for why in unsafe_to_shutdown_zones.values() { + use nexus_types::deployment::ZoneUnsafeToShutdown; + match why { + ZoneUnsafeToShutdown::Nexus { + zone_generation, + current_nexus_generation, + } => { + assert_eq!(zone_generation, &Generation::new()); + assert_eq!( + current_nexus_generation, + &Some(Generation::new()) + ); + } + _ => panic!("Unexpected unsafe-to-shutdown zone: {why}"), + } + } + assert_eq!( + unsafe_to_shutdown_zones.len(), + bp_generator.example.input.target_nexus_zone_count() + ); + + // Move ourselves to a "new Nexus". Now observe: we expunge the old + // Nexus zones. + bp_generator.set_current_nexus_to_highest_generation(); + + // Old Nexuses which are in-service + let mut old_nexuses = + bp_generator.example.input.target_nexus_zone_count(); + // Old Nexuses which were expunged, but which still need propagation + let mut expunging_nexuses = 0; + + while old_nexuses > 0 || expunging_nexuses > 0 { + let new_bp = bp_generator.plan_new_blueprint("removal"); + + // We expect to expunge one old nexus at a time, if any exist, and + // also to finalize the expungement of old nexuses that were removed + // in prior iterations. + let expected_modified_nexuses = + expunging_nexuses + if old_nexuses > 0 { 1 } else { 0 }; + + { + let summary = + new_bp.diff_since_blueprint(&bp_generator.blueprint); + assert_eq!( + summary.total_zones_added(), + 0, + "{}", + summary.display() + ); + assert_eq!( + summary.total_zones_removed(), + 0, + "{}", + summary.display() + ); + assert_eq!( + summary.total_zones_modified(), + expected_modified_nexuses, + "{}", + summary.display() + ); + } + if old_nexuses > 0 { + old_nexuses -= 1; + expunging_nexuses = 1; + } else { + expunging_nexuses = 0; + } + + bp_generator.blueprint = new_bp; + bp_generator.update_inventory_from_blueprint(); + } + + let new_bp = bp_generator.plan_new_blueprint("no-op"); + bp_generator.assert_child_bp_makes_no_changes(&new_bp); + bp_generator.assert_child_bp_has_no_report(&new_bp); + + // Check: If the "old TUF repo = new TUF repo", we'll still make no changes + bp_generator.set_old_tuf_repo_to_target(); + let new_bp = bp_generator.plan_new_blueprint("repo-update"); + bp_generator.assert_child_bp_makes_no_changes(&new_bp); + bp_generator.assert_child_bp_has_no_report(&new_bp); + + // After all this, the Nexus generation number has still been updated + // exactly once. + assert_eq!(new_bp.nexus_generation, old_generation.next()); + + logctx.cleanup_successful(); + } } diff --git a/nexus/reconfigurator/planning/src/system.rs b/nexus/reconfigurator/planning/src/system.rs index f09315dd6ac..6412838c52c 100644 --- a/nexus/reconfigurator/planning/src/system.rs +++ b/nexus/reconfigurator/planning/src/system.rs @@ -1020,6 +1020,7 @@ impl SystemDescription { self.internal_dns_version, self.external_dns_version, CockroachDbSettings::empty(), + None, ); builder.set_ignore_impossible_mgs_updates_since( self.ignore_impossible_mgs_updates_since, diff --git a/nexus/reconfigurator/planning/tests/output/example_builder_zone_counts_blueprint.txt b/nexus/reconfigurator/planning/tests/output/example_builder_zone_counts_blueprint.txt index 15d2c2d6a77..6125703611f 100644 --- a/nexus/reconfigurator/planning/tests/output/example_builder_zone_counts_blueprint.txt +++ b/nexus/reconfigurator/planning/tests/output/example_builder_zone_counts_blueprint.txt @@ -531,6 +531,7 @@ parent: e35b2fdd-354d-48d9-acb5-703b2c269a54 internal DNS version::: 1 external DNS version::: 1 target release min gen: 1 + nexus gen:::::::::::::: 1 PENDING MGS-MANAGED UPDATES: 0 diff --git a/nexus/reconfigurator/planning/tests/output/planner_basic_add_sled_2_3.txt b/nexus/reconfigurator/planning/tests/output/planner_basic_add_sled_2_3.txt index 99e950f246e..98982124114 100644 --- a/nexus/reconfigurator/planning/tests/output/planner_basic_add_sled_2_3.txt +++ b/nexus/reconfigurator/planning/tests/output/planner_basic_add_sled_2_3.txt @@ -71,6 +71,7 @@ to: blueprint 4171ad05-89dd-474b-846b-b007e4346366 internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) diff --git a/nexus/reconfigurator/planning/tests/output/planner_basic_add_sled_3_5.txt b/nexus/reconfigurator/planning/tests/output/planner_basic_add_sled_3_5.txt index 41df1375187..b724ec830ab 100644 --- a/nexus/reconfigurator/planning/tests/output/planner_basic_add_sled_3_5.txt +++ b/nexus/reconfigurator/planning/tests/output/planner_basic_add_sled_3_5.txt @@ -101,6 +101,7 @@ to: blueprint f432fcd5-1284-4058-8b4a-9286a3de6163 internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) diff --git a/nexus/reconfigurator/planning/tests/output/planner_dataset_settings_modified_in_place_1_2.txt b/nexus/reconfigurator/planning/tests/output/planner_dataset_settings_modified_in_place_1_2.txt index 0e936766516..b619e3f7ddf 100644 --- a/nexus/reconfigurator/planning/tests/output/planner_dataset_settings_modified_in_place_1_2.txt +++ b/nexus/reconfigurator/planning/tests/output/planner_dataset_settings_modified_in_place_1_2.txt @@ -122,6 +122,7 @@ to: blueprint fe13be30-94c2-4fa6-aad5-ae3c5028f6bb internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) diff --git a/nexus/reconfigurator/planning/tests/output/planner_decommissions_sleds_1_2.txt b/nexus/reconfigurator/planning/tests/output/planner_decommissions_sleds_1_2.txt index 488e3f69d00..e79bf2daf74 100644 --- a/nexus/reconfigurator/planning/tests/output/planner_decommissions_sleds_1_2.txt +++ b/nexus/reconfigurator/planning/tests/output/planner_decommissions_sleds_1_2.txt @@ -387,6 +387,7 @@ to: blueprint 1ac2d88f-27dd-4506-8585-6b2be832528e internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) diff --git a/nexus/reconfigurator/planning/tests/output/planner_decommissions_sleds_bp2.txt b/nexus/reconfigurator/planning/tests/output/planner_decommissions_sleds_bp2.txt index 93d346b3170..b96700bc0ba 100644 --- a/nexus/reconfigurator/planning/tests/output/planner_decommissions_sleds_bp2.txt +++ b/nexus/reconfigurator/planning/tests/output/planner_decommissions_sleds_bp2.txt @@ -322,6 +322,7 @@ parent: 516e80a3-b362-4fac-bd3c-4559717120dd internal DNS version::: 1 external DNS version::: 1 target release min gen: 1 + nexus gen:::::::::::::: 1 PENDING MGS-MANAGED UPDATES: 0 @@ -330,7 +331,9 @@ chicken switches: add zones with mupdate override: false * discretionary zones placed: - * 2 zones on sled d67ce8f0-a691-4010-b414-420d82e80527: crucible_pantry, nexus - * 2 zones on sled fefcf4cf-f7e7-46b3-b629-058526ce440e: clickhouse, internal_dns + * crucible_pantry zone on sled d67ce8f0-a691-4010-b414-420d82e80527 from source install dataset + * nexus zone on sled d67ce8f0-a691-4010-b414-420d82e80527 from source install dataset + * clickhouse zone on sled fefcf4cf-f7e7-46b3-b629-058526ce440e from source install dataset + * internal_dns zone on sled fefcf4cf-f7e7-46b3-b629-058526ce440e from source install dataset * zone updates waiting on discretionary zones diff --git a/nexus/reconfigurator/planning/tests/output/planner_deploy_all_keeper_nodes_1_2.txt b/nexus/reconfigurator/planning/tests/output/planner_deploy_all_keeper_nodes_1_2.txt index 63380e2c1eb..1be47958ab0 100644 --- a/nexus/reconfigurator/planning/tests/output/planner_deploy_all_keeper_nodes_1_2.txt +++ b/nexus/reconfigurator/planning/tests/output/planner_deploy_all_keeper_nodes_1_2.txt @@ -320,6 +320,7 @@ to: blueprint 31ef2071-2ec9-49d9-8827-fd83b17a0e3d internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) diff --git a/nexus/reconfigurator/planning/tests/output/planner_deploy_all_keeper_nodes_3_4.txt b/nexus/reconfigurator/planning/tests/output/planner_deploy_all_keeper_nodes_3_4.txt index b2d2dee5588..af0649e7506 100644 --- a/nexus/reconfigurator/planning/tests/output/planner_deploy_all_keeper_nodes_3_4.txt +++ b/nexus/reconfigurator/planning/tests/output/planner_deploy_all_keeper_nodes_3_4.txt @@ -9,6 +9,7 @@ to: blueprint 92fa943c-7dd4-48c3-9447-c9d0665744b6 internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) diff --git a/nexus/reconfigurator/planning/tests/output/planner_deploy_all_keeper_nodes_4_5.txt b/nexus/reconfigurator/planning/tests/output/planner_deploy_all_keeper_nodes_4_5.txt index ea64f823b0a..0dc39530072 100644 --- a/nexus/reconfigurator/planning/tests/output/planner_deploy_all_keeper_nodes_4_5.txt +++ b/nexus/reconfigurator/planning/tests/output/planner_deploy_all_keeper_nodes_4_5.txt @@ -223,6 +223,7 @@ to: blueprint 2886dab5-61a2-46b4-87af-bc7aeb44cccb internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) diff --git a/nexus/reconfigurator/planning/tests/output/planner_deploy_all_keeper_nodes_5_6.txt b/nexus/reconfigurator/planning/tests/output/planner_deploy_all_keeper_nodes_5_6.txt index 6b4c5f48e30..e33a4d4a9f0 100644 --- a/nexus/reconfigurator/planning/tests/output/planner_deploy_all_keeper_nodes_5_6.txt +++ b/nexus/reconfigurator/planning/tests/output/planner_deploy_all_keeper_nodes_5_6.txt @@ -9,6 +9,7 @@ to: blueprint cb39be9d-5476-44fa-9edf-9938376219ef internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) diff --git a/nexus/reconfigurator/planning/tests/output/planner_expunge_clickhouse_clusters_3_4.txt b/nexus/reconfigurator/planning/tests/output/planner_expunge_clickhouse_clusters_3_4.txt index 9394b253cc6..64f86ecfe65 100644 --- a/nexus/reconfigurator/planning/tests/output/planner_expunge_clickhouse_clusters_3_4.txt +++ b/nexus/reconfigurator/planning/tests/output/planner_expunge_clickhouse_clusters_3_4.txt @@ -408,6 +408,7 @@ to: blueprint 74f2e7fd-687e-4c9e-b5d8-e474a5bb8e7c internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) diff --git a/nexus/reconfigurator/planning/tests/output/planner_expunge_clickhouse_clusters_5_6.txt b/nexus/reconfigurator/planning/tests/output/planner_expunge_clickhouse_clusters_5_6.txt index 744379716fc..f5a11fe0dbc 100644 --- a/nexus/reconfigurator/planning/tests/output/planner_expunge_clickhouse_clusters_5_6.txt +++ b/nexus/reconfigurator/planning/tests/output/planner_expunge_clickhouse_clusters_5_6.txt @@ -9,6 +9,7 @@ to: blueprint df68d4d4-5af4-4b56-95bb-1654a6957d4f internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) diff --git a/nexus/reconfigurator/planning/tests/output/planner_expunge_clickhouse_zones_after_policy_is_changed_3_4.txt b/nexus/reconfigurator/planning/tests/output/planner_expunge_clickhouse_zones_after_policy_is_changed_3_4.txt index 5e439554691..fd72c3cda5c 100644 --- a/nexus/reconfigurator/planning/tests/output/planner_expunge_clickhouse_zones_after_policy_is_changed_3_4.txt +++ b/nexus/reconfigurator/planning/tests/output/planner_expunge_clickhouse_zones_after_policy_is_changed_3_4.txt @@ -338,6 +338,7 @@ to: blueprint d895ef50-9978-454c-bdfb-b8dbe2c9a918 internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) diff --git a/nexus/reconfigurator/planning/tests/output/planner_nonprovisionable_1_2.txt b/nexus/reconfigurator/planning/tests/output/planner_nonprovisionable_1_2.txt index 99ccd504aaf..13f6efd866f 100644 --- a/nexus/reconfigurator/planning/tests/output/planner_nonprovisionable_1_2.txt +++ b/nexus/reconfigurator/planning/tests/output/planner_nonprovisionable_1_2.txt @@ -373,6 +373,7 @@ to: blueprint 9f71f5d3-a272-4382-9154-6ea2e171a6c6 internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) diff --git a/nexus/reconfigurator/planning/tests/output/planner_nonprovisionable_2_2a.txt b/nexus/reconfigurator/planning/tests/output/planner_nonprovisionable_2_2a.txt index 8bd822a364c..162e4c1ad69 100644 --- a/nexus/reconfigurator/planning/tests/output/planner_nonprovisionable_2_2a.txt +++ b/nexus/reconfigurator/planning/tests/output/planner_nonprovisionable_2_2a.txt @@ -349,6 +349,9 @@ mismatched zone type: after: Nexus( }, external_tls: false, external_dns_servers: [], + nexus_generation: Generation( + 1, + ), }, ) @@ -368,6 +371,7 @@ mismatched zone type: after: InternalNtp( internal DNS version::: 1 (unchanged) * external DNS version::: 1 -> 2 target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) diff --git a/nexus/reconfigurator/planning/tests/output/planner_nonprovisionable_bp2.txt b/nexus/reconfigurator/planning/tests/output/planner_nonprovisionable_bp2.txt index 77c19780bed..3ccbc731707 100644 --- a/nexus/reconfigurator/planning/tests/output/planner_nonprovisionable_bp2.txt +++ b/nexus/reconfigurator/planning/tests/output/planner_nonprovisionable_bp2.txt @@ -510,6 +510,7 @@ parent: 4d4e6c38-cd95-4c4e-8f45-6af4d686964b internal DNS version::: 1 external DNS version::: 1 target release min gen: 1 + nexus gen:::::::::::::: 1 PENDING MGS-MANAGED UPDATES: 0 @@ -518,7 +519,11 @@ chicken switches: add zones with mupdate override: false * discretionary zones placed: - * 3 zones on sled 75bc286f-2b4b-482c-9431-59272af529da: nexus, nexus, nexus - * 3 zones on sled affab35f-600a-4109-8ea0-34a067a4e0bc: nexus, nexus, nexus + * nexus zone on sled 75bc286f-2b4b-482c-9431-59272af529da from source install dataset + * nexus zone on sled 75bc286f-2b4b-482c-9431-59272af529da from source install dataset + * nexus zone on sled 75bc286f-2b4b-482c-9431-59272af529da from source install dataset + * nexus zone on sled affab35f-600a-4109-8ea0-34a067a4e0bc from source install dataset + * nexus zone on sled affab35f-600a-4109-8ea0-34a067a4e0bc from source install dataset + * nexus zone on sled affab35f-600a-4109-8ea0-34a067a4e0bc from source install dataset * zone updates waiting on discretionary zones diff --git a/nexus/reconfigurator/planning/tests/output/zone_image_source_change_1.txt b/nexus/reconfigurator/planning/tests/output/zone_image_source_change_1.txt index 440e7e28e51..1f229c2ec10 100644 --- a/nexus/reconfigurator/planning/tests/output/zone_image_source_change_1.txt +++ b/nexus/reconfigurator/planning/tests/output/zone_image_source_change_1.txt @@ -122,6 +122,7 @@ to: blueprint 1481141d-a5cf-4103-8344-738967e0f110 internal DNS version::: 1 (unchanged) external DNS version::: 1 (unchanged) target release min gen: 1 (unchanged) + nexus gen:::::::::::::: 1 (unchanged) OXIMETER SETTINGS: generation: 1 (unchanged) diff --git a/nexus/reconfigurator/preparation/src/lib.rs b/nexus/reconfigurator/preparation/src/lib.rs index 20c8cb63690..c6b0b87d377 100644 --- a/nexus/reconfigurator/preparation/src/lib.rs +++ b/nexus/reconfigurator/preparation/src/lib.rs @@ -94,6 +94,7 @@ impl PlanningInputFromDb<'_> { opctx: &OpContext, datastore: &DataStore, chicken_switches: PlannerChickenSwitches, + current_nexus_zone_id: Option, ) -> Result { opctx.check_complex_operations_allowed()?; // Note we list *all* rows here including the ones for decommissioned @@ -226,13 +227,16 @@ impl PlanningInputFromDb<'_> { old_repo, chicken_switches, } - .build() + .build(current_nexus_zone_id) .internal_context("assembling planning_input")?; Ok(planning_input) } - pub fn build(&self) -> Result { + pub fn build( + &self, + current_nexus_zone_id: Option, + ) -> Result { let service_ip_pool_ranges = self.ip_pool_range_rows.iter().map(IpRange::from).collect(); let policy = Policy { @@ -257,6 +261,7 @@ impl PlanningInputFromDb<'_> { self.internal_dns_version.into(), self.external_dns_version.into(), self.cockroachdb_settings.clone(), + current_nexus_zone_id, ); let mut zpools_by_sled_id = { @@ -385,6 +390,7 @@ async fn fetch_all_service_ip_pool_ranges( pub async fn reconfigurator_state_load( opctx: &OpContext, datastore: &DataStore, + current_nexus_zone_id: Option, ) -> Result { opctx.check_complex_operations_allowed()?; let chicken_switches = datastore @@ -393,9 +399,13 @@ pub async fn reconfigurator_state_load( .map_or_else(PlannerChickenSwitches::default, |switches| { switches.switches.planner_switches }); - let planning_input = - PlanningInputFromDb::assemble(opctx, datastore, chicken_switches) - .await?; + let planning_input = PlanningInputFromDb::assemble( + opctx, + datastore, + chicken_switches, + current_nexus_zone_id, + ) + .await?; let collection_ids = datastore .inventory_collections() .await diff --git a/nexus/src/app/background/init.rs b/nexus/src/app/background/init.rs index 51b6d1d8658..78f1ef56617 100644 --- a/nexus/src/app/background/init.rs +++ b/nexus/src/app/background/init.rs @@ -499,6 +499,7 @@ impl BackgroundTasksInitializer { // target blueprint. let blueprint_planner = blueprint_planner::BlueprintPlanner::new( datastore.clone(), + args.nexus_id, chicken_switches_watcher.clone(), inventory_watcher.clone(), rx_blueprint.clone(), diff --git a/nexus/src/app/background/tasks/blueprint_execution.rs b/nexus/src/app/background/tasks/blueprint_execution.rs index 2ac61471e7c..27315202719 100644 --- a/nexus/src/app/background/tasks/blueprint_execution.rs +++ b/nexus/src/app/background/tasks/blueprint_execution.rs @@ -278,6 +278,7 @@ mod test { internal_dns_version: dns_version, external_dns_version: dns_version, target_release_minimum_generation: Generation::new(), + nexus_generation: Generation::new(), cockroachdb_fingerprint: String::new(), clickhouse_cluster_config: None, oximeter_read_version: Generation::new(), diff --git a/nexus/src/app/background/tasks/blueprint_load.rs b/nexus/src/app/background/tasks/blueprint_load.rs index d2d9c7c380e..7b7f546388d 100644 --- a/nexus/src/app/background/tasks/blueprint_load.rs +++ b/nexus/src/app/background/tasks/blueprint_load.rs @@ -225,6 +225,7 @@ mod test { internal_dns_version: Generation::new(), external_dns_version: Generation::new(), target_release_minimum_generation: Generation::new(), + nexus_generation: Generation::new(), cockroachdb_fingerprint: String::new(), clickhouse_cluster_config: None, oximeter_read_version: Generation::new(), diff --git a/nexus/src/app/background/tasks/blueprint_planner.rs b/nexus/src/app/background/tasks/blueprint_planner.rs index d6519fdad3b..9ae27a227fe 100644 --- a/nexus/src/app/background/tasks/blueprint_planner.rs +++ b/nexus/src/app/background/tasks/blueprint_planner.rs @@ -19,6 +19,7 @@ use nexus_types::internal_api::background::BlueprintPlannerStatus; use omicron_common::api::external::LookupType; use omicron_uuid_kinds::CollectionUuid; use omicron_uuid_kinds::GenericUuid as _; +use omicron_uuid_kinds::OmicronZoneUuid; use serde_json::json; use std::sync::Arc; use tokio::sync::watch::{self, Receiver, Sender}; @@ -26,6 +27,7 @@ use tokio::sync::watch::{self, Receiver, Sender}; /// Background task that runs the update planner. pub struct BlueprintPlanner { datastore: Arc, + nexus_id: OmicronZoneUuid, rx_chicken_switches: Receiver, rx_inventory: Receiver>, rx_blueprint: Receiver>>, @@ -35,6 +37,7 @@ pub struct BlueprintPlanner { impl BlueprintPlanner { pub fn new( datastore: Arc, + nexus_id: OmicronZoneUuid, rx_chicken_switches: Receiver, rx_inventory: Receiver>, rx_blueprint: Receiver>>, @@ -42,6 +45,7 @@ impl BlueprintPlanner { let (tx_blueprint, _) = watch::channel(None); Self { datastore, + nexus_id, rx_chicken_switches, rx_inventory, rx_blueprint, @@ -118,6 +122,7 @@ impl BlueprintPlanner { opctx, &self.datastore, switches.switches.planner_switches, + Some(self.nexus_id), ) .await { @@ -341,6 +346,7 @@ mod test { // Finally, spin up the planner background task. let mut planner = BlueprintPlanner::new( datastore.clone(), + nexus.id, chicken_switches_collector_rx, rx_collector, rx_loader.clone(), diff --git a/nexus/src/app/deployment.rs b/nexus/src/app/deployment.rs index 43f6f558a4b..0956e07ba60 100644 --- a/nexus/src/app/deployment.rs +++ b/nexus/src/app/deployment.rs @@ -139,9 +139,13 @@ impl super::Nexus { switches.switches.planner_switches }); - let planning_input = - PlanningInputFromDb::assemble(opctx, datastore, chicken_switches) - .await?; + let planning_input = PlanningInputFromDb::assemble( + opctx, + datastore, + chicken_switches, + Some(self.id), + ) + .await?; // The choice of which inventory collection to use here is not // necessarily trivial. Inventory collections may be incomplete due to diff --git a/nexus/test-utils/src/lib.rs b/nexus/test-utils/src/lib.rs index 5022f142924..f84367946dc 100644 --- a/nexus/test-utils/src/lib.rs +++ b/nexus/test-utils/src/lib.rs @@ -887,6 +887,7 @@ impl<'a, N: NexusServer> ControlPlaneTestContextBuilder<'a, N> { vni: Vni::SERVICES_VNI, transit_ips: vec![], }, + nexus_generation: Generation::new(), }), image_source: BlueprintZoneImageSource::InstallDataset, }); @@ -972,6 +973,7 @@ impl<'a, N: NexusServer> ControlPlaneTestContextBuilder<'a, N> { internal_dns_version: dns_config.generation, external_dns_version: Generation::new(), target_release_minimum_generation: Generation::new(), + nexus_generation: Generation::new(), cockroachdb_fingerprint: String::new(), cockroachdb_setting_preserve_downgrade: CockroachDbPreserveDowngrade::DoNotModify, diff --git a/nexus/types/src/deployment.rs b/nexus/types/src/deployment.rs index 5928daf9363..7a2cca518f9 100644 --- a/nexus/types/src/deployment.rs +++ b/nexus/types/src/deployment.rs @@ -124,12 +124,14 @@ pub use planning_input::TufRepoContentsError; pub use planning_input::TufRepoPolicy; pub use planning_input::ZpoolFilter; pub use planning_report::CockroachdbUnsafeToShutdown; +pub use planning_report::NexusGenerationBumpWaitingOn; pub use planning_report::PlanningAddStepReport; pub use planning_report::PlanningCockroachdbSettingsStepReport; pub use planning_report::PlanningDecommissionStepReport; pub use planning_report::PlanningExpungeStepReport; pub use planning_report::PlanningMgsUpdatesStepReport; pub use planning_report::PlanningMupdateOverrideStepReport; +pub use planning_report::PlanningNexusGenerationBumpReport; pub use planning_report::PlanningNoopImageSourceSkipSledHostPhase2Reason; pub use planning_report::PlanningNoopImageSourceSkipSledZonesReason; pub use planning_report::PlanningNoopImageSourceSkipZoneReason; @@ -227,6 +229,12 @@ pub struct Blueprint { /// driving the system to the target release. pub target_release_minimum_generation: Generation, + /// The generation of the active group of Nexuses + /// + /// If a Nexus instance notices it has a nexus_generation less than + /// this value, it will start to quiesce (see: RFD 588). + pub nexus_generation: Generation, + /// CockroachDB state fingerprint when this blueprint was created // See `nexus/db-queries/src/db/datastore/cockroachdb_settings.rs` for more // on this. @@ -275,6 +283,7 @@ impl Blueprint { external_dns_version: self.external_dns_version, target_release_minimum_generation: self .target_release_minimum_generation, + nexus_generation: self.nexus_generation, cockroachdb_fingerprint: self.cockroachdb_fingerprint.clone(), cockroachdb_setting_preserve_downgrade: Some( self.cockroachdb_setting_preserve_downgrade, @@ -609,6 +618,7 @@ impl BlueprintDisplay<'_> { .target_release_minimum_generation .to_string(), ), + (NEXUS_GENERATION, self.blueprint.nexus_generation.to_string()), ], ) } @@ -651,6 +661,7 @@ impl fmt::Display for BlueprintDisplay<'_> { // These six fields are handled by `make_metadata_table()`, called // below. target_release_minimum_generation: _, + nexus_generation: _, internal_dns_version: _, external_dns_version: _, time_created: _, @@ -2073,6 +2084,10 @@ pub struct BlueprintMetadata { /// /// See [`Blueprint::target_release_minimum_generation`]. pub target_release_minimum_generation: Generation, + /// The Nexus generation number + /// + /// See [`Blueprint::nexus_generation`]. + pub nexus_generation: Generation, /// CockroachDB state fingerprint when this blueprint was created pub cockroachdb_fingerprint: String, /// Whether to set `cluster.preserve_downgrade_option` and what to set it to diff --git a/nexus/types/src/deployment/blueprint_diff.rs b/nexus/types/src/deployment/blueprint_diff.rs index a29cb57317f..6a1646ddd48 100644 --- a/nexus/types/src/deployment/blueprint_diff.rs +++ b/nexus/types/src/deployment/blueprint_diff.rs @@ -64,6 +64,7 @@ impl<'a> BlueprintDiffSummary<'a> { pending_mgs_updates, clickhouse_cluster_config, target_release_minimum_generation, + nexus_generation, // Metadata fields for which changes don't reflect semantic // changes from one blueprint to the next. id: _, @@ -112,6 +113,11 @@ impl<'a> BlueprintDiffSummary<'a> { return true; } + // Did the nexus generation change? + if nexus_generation.before != nexus_generation.after { + return true; + } + // All fields checked or ignored; if we get here, there are no // meaningful changes. false @@ -1834,6 +1840,7 @@ impl<'diff, 'b> BlueprintDiffDisplay<'diff, 'b> { target_release_minimum_generation, TARGET_RELEASE_MIN_GEN ), + diff_row!(nexus_generation, NEXUS_GENERATION), ], ), ] diff --git a/nexus/types/src/deployment/blueprint_display.rs b/nexus/types/src/deployment/blueprint_display.rs index e0dc0080f95..dec9ce3e699 100644 --- a/nexus/types/src/deployment/blueprint_display.rs +++ b/nexus/types/src/deployment/blueprint_display.rs @@ -44,6 +44,7 @@ pub mod constants { pub const EXTERNAL_DNS_VERSION: &str = "external DNS version"; // Keep this a bit short to not make the key column too wide. pub const TARGET_RELEASE_MIN_GEN: &str = "target release min gen"; + pub const NEXUS_GENERATION: &str = "nexus gen"; pub const COMMENT: &str = "comment"; pub const UNCHANGED_PARENS: &str = "(unchanged)"; diff --git a/nexus/types/src/deployment/planning_input.rs b/nexus/types/src/deployment/planning_input.rs index e5c3f499360..6d4ee2213ff 100644 --- a/nexus/types/src/deployment/planning_input.rs +++ b/nexus/types/src/deployment/planning_input.rs @@ -123,6 +123,12 @@ pub struct PlanningInput { /// mark under the assumption that they may appear to be impossible because /// they're currently in progress. ignore_impossible_mgs_updates_since: DateTime, + + /// ID of the currently running Nexus zone + /// + /// This is used to identify which Nexus is currently executing the planning + /// operation, which is needed for safe shutdown decisions during handoff. + current_nexus_zone_id: Option, } impl PlanningInput { @@ -240,6 +246,15 @@ impl PlanningInput { self.policy.oximeter_read_policy.mode.single_node_enabled() } + /// ID of the currently running Nexus zone + pub fn current_nexus_zone_id(&self) -> Option { + self.current_nexus_zone_id + } + + pub fn set_current_nexus_zone_id(&mut self, id: OmicronZoneUuid) { + self.current_nexus_zone_id = Some(id); + } + pub fn all_sleds( &self, filter: SledFilter, @@ -318,6 +333,7 @@ impl PlanningInput { network_resources: self.network_resources, ignore_impossible_mgs_updates_since: self .ignore_impossible_mgs_updates_since, + current_nexus_zone_id: self.current_nexus_zone_id, } } } @@ -1260,6 +1276,7 @@ pub struct PlanningInputBuilder { sleds: BTreeMap, network_resources: OmicronZoneNetworkResources, ignore_impossible_mgs_updates_since: DateTime, + current_nexus_zone_id: Option, } impl PlanningInputBuilder { @@ -1288,6 +1305,7 @@ impl PlanningInputBuilder { sleds: BTreeMap::new(), network_resources: OmicronZoneNetworkResources::new(), ignore_impossible_mgs_updates_since: Utc::now(), + current_nexus_zone_id: None, } } @@ -1296,6 +1314,7 @@ impl PlanningInputBuilder { internal_dns_version: Generation, external_dns_version: Generation, cockroachdb_settings: CockroachDbSettings, + current_nexus_zone_id: Option, ) -> Self { Self { policy, @@ -1306,6 +1325,7 @@ impl PlanningInputBuilder { network_resources: OmicronZoneNetworkResources::new(), ignore_impossible_mgs_updates_since: Utc::now() - MGS_UPDATE_SETTLE_TIMEOUT, + current_nexus_zone_id, } } @@ -1401,6 +1421,13 @@ impl PlanningInputBuilder { self.cockroachdb_settings = cockroachdb_settings; } + pub fn set_current_nexus_zone_id( + &mut self, + current_nexus_zone_id: Option, + ) { + self.current_nexus_zone_id = current_nexus_zone_id; + } + pub fn build(self) -> PlanningInput { PlanningInput { policy: self.policy, @@ -1411,6 +1438,7 @@ impl PlanningInputBuilder { network_resources: self.network_resources, ignore_impossible_mgs_updates_since: self .ignore_impossible_mgs_updates_since, + current_nexus_zone_id: self.current_nexus_zone_id, } } } diff --git a/nexus/types/src/deployment/planning_report.rs b/nexus/types/src/deployment/planning_report.rs index 04d1fde21ee..4bffa33521f 100644 --- a/nexus/types/src/deployment/planning_report.rs +++ b/nexus/types/src/deployment/planning_report.rs @@ -12,6 +12,7 @@ use super::PendingMgsUpdates; use super::PlannerChickenSwitches; use daft::Diffable; +use omicron_common::api::external::Generation; use omicron_common::policy::COCKROACHDB_REDUNDANCY; use omicron_uuid_kinds::BlueprintUuid; use omicron_uuid_kinds::MupdateOverrideUuid; @@ -61,6 +62,7 @@ pub struct PlanningReport { pub mgs_updates: PlanningMgsUpdatesStepReport, pub add: PlanningAddStepReport, pub zone_updates: PlanningZoneUpdatesStepReport, + pub nexus_generation_bump: PlanningNexusGenerationBumpReport, pub cockroachdb_settings: PlanningCockroachdbSettingsStepReport, } @@ -77,6 +79,7 @@ impl PlanningReport { ), add: PlanningAddStepReport::new(), zone_updates: PlanningZoneUpdatesStepReport::new(), + nexus_generation_bump: PlanningNexusGenerationBumpReport::new(), cockroachdb_settings: PlanningCockroachdbSettingsStepReport::new(), } } @@ -88,6 +91,7 @@ impl PlanningReport { && self.mgs_updates.is_empty() && self.add.is_empty() && self.zone_updates.is_empty() + && self.nexus_generation_bump.is_empty() && self.cockroachdb_settings.is_empty() } } @@ -110,6 +114,7 @@ impl fmt::Display for PlanningReport { mgs_updates, add, zone_updates, + nexus_generation_bump, cockroachdb_settings, } = self; writeln!(f, "planning report for blueprint {blueprint_id}:")?; @@ -126,6 +131,7 @@ impl fmt::Display for PlanningReport { mgs_updates.fmt(f)?; add.fmt(f)?; zone_updates.fmt(f)?; + nexus_generation_bump.fmt(f)?; cockroachdb_settings.fmt(f)?; } Ok(()) @@ -526,6 +532,14 @@ pub struct PlanningAddSufficientZonesExist { pub num_existing: usize, } +#[derive( + Clone, Debug, Deserialize, Serialize, PartialEq, Eq, Diffable, JsonSchema, +)] +pub struct DiscretionaryZonePlacement { + kind: String, + source: String, +} + #[derive( Clone, Debug, Deserialize, Serialize, PartialEq, Eq, Diffable, JsonSchema, )] @@ -573,7 +587,8 @@ pub struct PlanningAddStepReport { /// Sled ID → kinds of discretionary zones placed there // TODO: make `sled_add_zone_*` methods return the added zone config // so that we can report it here. - pub discretionary_zones_placed: BTreeMap>, + pub discretionary_zones_placed: + BTreeMap>, } impl PlanningAddStepReport { @@ -655,11 +670,22 @@ impl PlanningAddStepReport { &mut self, sled_id: SledUuid, zone_kind: &str, + image_source: &BlueprintZoneImageSource, ) { self.discretionary_zones_placed .entry(sled_id) - .and_modify(|kinds| kinds.push(zone_kind.to_owned())) - .or_insert_with(|| vec![zone_kind.to_owned()]); + .and_modify(|kinds| { + kinds.push(DiscretionaryZonePlacement { + kind: zone_kind.to_owned(), + source: image_source.to_string(), + }) + }) + .or_insert_with(|| { + vec![DiscretionaryZonePlacement { + kind: zone_kind.to_owned(), + source: image_source.to_string(), + }] + }); } } @@ -770,13 +796,13 @@ impl fmt::Display for PlanningAddStepReport { if !discretionary_zones_placed.is_empty() { writeln!(f, "* discretionary zones placed:")?; - for (sled_id, kinds) in discretionary_zones_placed.iter() { - let (n, s) = plural_vec(kinds); - writeln!( - f, - " * {n} zone{s} on sled {sled_id}: {}", - kinds.join(", ") - )?; + for (sled_id, placements) in discretionary_zones_placed.iter() { + for DiscretionaryZonePlacement { kind, source } in placements { + writeln!( + f, + " * {kind} zone on sled {sled_id} from source {source}", + )?; + } } } @@ -979,9 +1005,21 @@ impl ZoneUpdatesWaitingOn { )] #[serde(rename_all = "snake_case", tag = "type")] pub enum ZoneUnsafeToShutdown { - Cockroachdb { reason: CockroachdbUnsafeToShutdown }, - BoundaryNtp { total_boundary_ntp_zones: usize, synchronized_count: usize }, - InternalDns { total_internal_dns_zones: usize, synchronized_count: usize }, + Cockroachdb { + reason: CockroachdbUnsafeToShutdown, + }, + BoundaryNtp { + total_boundary_ntp_zones: usize, + synchronized_count: usize, + }, + InternalDns { + total_internal_dns_zones: usize, + synchronized_count: usize, + }, + Nexus { + zone_generation: Generation, + current_nexus_generation: Option, + }, } impl fmt::Display for ZoneUnsafeToShutdown { @@ -996,6 +1034,96 @@ impl fmt::Display for ZoneUnsafeToShutdown { total_internal_dns_zones: t, synchronized_count: s, } => write!(f, "only {s}/{t} internal DNS zones are synchronized"), + Self::Nexus { zone_generation, current_nexus_generation } => { + match current_nexus_generation { + Some(current) => write!( + f, + "zone gen ({zone_generation}) >= currently-running \ + Nexus gen ({current})" + ), + None => write!( + f, + "zone gen is {zone_generation}, but currently-running \ + Nexus generation is unknown" + ), + } + } + } + } +} + +#[derive( + Clone, Debug, Deserialize, Serialize, PartialEq, Eq, Diffable, JsonSchema, +)] +pub struct PlanningNexusGenerationBumpReport { + /// What are we waiting on to increase the generation number? + pub waiting_on: Option, + + pub next_generation: Option, +} + +impl PlanningNexusGenerationBumpReport { + pub fn new() -> Self { + Self { waiting_on: None, next_generation: None } + } + + pub fn is_empty(&self) -> bool { + self.waiting_on.is_none() && self.next_generation.is_none() + } + + pub fn set_waiting_on(&mut self, why: NexusGenerationBumpWaitingOn) { + self.waiting_on = Some(why); + } + + pub fn set_next_generation(&mut self, next_generation: Generation) { + self.next_generation = Some(next_generation); + } +} + +impl fmt::Display for PlanningNexusGenerationBumpReport { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let PlanningNexusGenerationBumpReport { waiting_on, next_generation } = + self; + + match (waiting_on, next_generation) { + (Some(why), _) => { + writeln!( + f, + "* waiting to update top-level nexus_generation: {}", + why.as_str() + )?; + } + (None, Some(gen)) => { + writeln!(f, "* updating top-level nexus_generation to: {gen}")?; + } + // Nothing to report + (None, None) => (), + } + Ok(()) + } +} + +#[derive( + Clone, Debug, Deserialize, Serialize, PartialEq, Eq, Diffable, JsonSchema, +)] +#[serde(rename_all = "snake_case", tag = "type")] +pub enum NexusGenerationBumpWaitingOn { + /// Waiting for non-Nexus zones to finish updating + NonNexusZoneUpdate, + + /// Waiting for enough new Nexus zones to appear + NewNexusBringup, + + /// Waiting for zones to propagate to inventory + ZonePropagation, +} + +impl NexusGenerationBumpWaitingOn { + pub fn as_str(&self) -> &'static str { + match self { + Self::NonNexusZoneUpdate => "pending non-nexus zone updates", + Self::NewNexusBringup => "waiting for new nexus zones", + Self::ZonePropagation => "pending zone reconciliation", } } } diff --git a/nexus/types/src/deployment/zone_type.rs b/nexus/types/src/deployment/zone_type.rs index 31e26c3a994..79cb68fb98a 100644 --- a/nexus/types/src/deployment/zone_type.rs +++ b/nexus/types/src/deployment/zone_type.rs @@ -343,6 +343,7 @@ pub mod blueprint_zone_type { use crate::deployment::OmicronZoneExternalSnatIp; use daft::Diffable; use nexus_sled_agent_shared::inventory::OmicronZoneDataset; + use omicron_common::api::external::Generation; use omicron_common::api::internal::shared::NetworkInterface; use schemars::JsonSchema; use serde::Deserialize; @@ -566,6 +567,10 @@ pub mod blueprint_zone_type { pub external_tls: bool, /// External DNS servers Nexus can use to resolve external hosts. pub external_dns_servers: Vec, + /// Generation number for this Nexus zone. + /// This is used to coordinate handoff between old and new Nexus instances + /// during updates. See RFD 588. + pub nexus_generation: Generation, } #[derive( diff --git a/openapi/nexus-internal.json b/openapi/nexus-internal.json index 82331b85632..c083fe29e4e 100644 --- a/openapi/nexus-internal.json +++ b/openapi/nexus-internal.json @@ -2568,6 +2568,14 @@ } ] }, + "nexus_generation": { + "description": "The generation of the active group of Nexuses\n\nIf a Nexus instance notices it has a nexus_generation less than this value, it will start to quiesce (see: RFD 588).", + "allOf": [ + { + "$ref": "#/components/schemas/Generation" + } + ] + }, "oximeter_read_mode": { "description": "Whether oximeter should read from a single node or a cluster", "allOf": [ @@ -2638,6 +2646,7 @@ "external_dns_version", "id", "internal_dns_version", + "nexus_generation", "oximeter_read_mode", "oximeter_read_version", "pending_mgs_updates", @@ -2862,6 +2871,14 @@ } ] }, + "nexus_generation": { + "description": "The Nexus generation number\n\nSee [`Blueprint::nexus_generation`].", + "allOf": [ + { + "$ref": "#/components/schemas/Generation" + } + ] + }, "parent_blueprint_id": { "nullable": true, "description": "which blueprint this blueprint is based on", @@ -2892,6 +2909,7 @@ "external_dns_version", "id", "internal_dns_version", + "nexus_generation", "target_release_minimum_generation", "time_created" ] @@ -3517,6 +3535,14 @@ "description": "The address at which the internal nexus server is reachable.", "type": "string" }, + "nexus_generation": { + "description": "Generation number for this Nexus zone. This is used to coordinate handoff between old and new Nexus instances during updates. See RFD 588.", + "allOf": [ + { + "$ref": "#/components/schemas/Generation" + } + ] + }, "nic": { "description": "The service vNIC providing external connectivity using OPTE.", "allOf": [ @@ -3537,6 +3563,7 @@ "external_ip", "external_tls", "internal_address", + "nexus_generation", "nic", "type" ] @@ -4192,6 +4219,21 @@ "saga_id" ] }, + "DiscretionaryZonePlacement": { + "type": "object", + "properties": { + "kind": { + "type": "string" + }, + "source": { + "type": "string" + } + }, + "required": [ + "kind", + "source" + ] + }, "DiskIdentity": { "description": "Uniquely identifies a disk.", "type": "object", @@ -5686,6 +5728,55 @@ "description": "Password hashes must be in PHC (Password Hashing Competition) string format. Passwords must be hashed with Argon2id. Password hashes may be rejected if the parameters appear not to be secure enough.", "type": "string" }, + "NexusGenerationBumpWaitingOn": { + "oneOf": [ + { + "description": "Waiting for non-Nexus zones to finish updating", + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "non_nexus_zone_update" + ] + } + }, + "required": [ + "type" + ] + }, + { + "description": "Waiting for enough new Nexus zones to appear", + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "new_nexus_bringup" + ] + } + }, + "required": [ + "type" + ] + }, + { + "description": "Waiting for zones to propagate to inventory", + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "zone_propagation" + ] + } + }, + "required": [ + "type" + ] + } + ] + }, "NodeName": { "description": "Unique name for a saga [`Node`]\n\nEach node requires a string name that's unique within its DAG. The name is used to identify its output. Nodes that depend on a given node (either directly or indirectly) can access the node's output using its name.", "type": "string" @@ -6583,7 +6674,7 @@ "additionalProperties": { "type": "array", "items": { - "type": "string" + "$ref": "#/components/schemas/DiscretionaryZonePlacement" } } }, @@ -6745,6 +6836,28 @@ "pending_mgs_updates" ] }, + "PlanningNexusGenerationBumpReport": { + "type": "object", + "properties": { + "next_generation": { + "nullable": true, + "allOf": [ + { + "$ref": "#/components/schemas/Generation" + } + ] + }, + "waiting_on": { + "nullable": true, + "description": "What are we waiting on to increase the generation number?", + "allOf": [ + { + "$ref": "#/components/schemas/NexusGenerationBumpWaitingOn" + } + ] + } + } + }, "PlanningNoopImageSourceConverted": { "description": "How many of the total install-dataset zones and/or host phase 2 slots were noop-converted to use the artifact store on a particular sled.", "type": "object", @@ -7048,6 +7161,9 @@ "mgs_updates": { "$ref": "#/components/schemas/PlanningMgsUpdatesStepReport" }, + "nexus_generation_bump": { + "$ref": "#/components/schemas/PlanningNexusGenerationBumpReport" + }, "noop_image_source": { "$ref": "#/components/schemas/PlanningNoopImageSourceStepReport" }, @@ -7063,6 +7179,7 @@ "decommission", "expunge", "mgs_updates", + "nexus_generation_bump", "noop_image_source", "zone_updates" ] @@ -9228,6 +9345,32 @@ "total_internal_dns_zones", "type" ] + }, + { + "type": "object", + "properties": { + "current_nexus_generation": { + "nullable": true, + "allOf": [ + { + "$ref": "#/components/schemas/Generation" + } + ] + }, + "type": { + "type": "string", + "enum": [ + "nexus" + ] + }, + "zone_generation": { + "$ref": "#/components/schemas/Generation" + } + }, + "required": [ + "type", + "zone_generation" + ] } ] }, diff --git a/schema/crdb/dbinit.sql b/schema/crdb/dbinit.sql index 4ef07d9dbdf..34547d92ba1 100644 --- a/schema/crdb/dbinit.sql +++ b/schema/crdb/dbinit.sql @@ -4522,7 +4522,10 @@ CREATE TABLE IF NOT EXISTS omicron.public.blueprint ( -- driving the system to the target release. -- -- This is set to 1 by default in application code. - target_release_minimum_generation INT8 NOT NULL + target_release_minimum_generation INT8 NOT NULL, + + -- The generation of the active group of Nexus instances + nexus_generation INT8 NOT NULL ); -- table describing both the current and historical target blueprints of the @@ -4732,6 +4735,9 @@ CREATE TABLE IF NOT EXISTS omicron.public.bp_omicron_zone ( image_source omicron.public.bp_zone_image_source NOT NULL, image_artifact_sha256 STRING(64), + -- Generation for Nexus zones + nexus_generation INT8, + PRIMARY KEY (blueprint_id, id), CONSTRAINT expunged_disposition_properties CHECK ( @@ -6562,7 +6568,7 @@ INSERT INTO omicron.public.db_metadata ( version, target_version ) VALUES - (TRUE, NOW(), NOW(), '183.0.0', NULL) + (TRUE, NOW(), NOW(), '184.0.0', NULL) ON CONFLICT DO NOTHING; COMMIT; diff --git a/schema/crdb/nexus-generation/up01.sql b/schema/crdb/nexus-generation/up01.sql new file mode 100644 index 00000000000..42d87c2f6f7 --- /dev/null +++ b/schema/crdb/nexus-generation/up01.sql @@ -0,0 +1 @@ +ALTER TABLE omicron.public.bp_omicron_zone ADD COLUMN IF NOT EXISTS nexus_generation INT8; diff --git a/schema/crdb/nexus-generation/up02.sql b/schema/crdb/nexus-generation/up02.sql new file mode 100644 index 00000000000..53429df8ebe --- /dev/null +++ b/schema/crdb/nexus-generation/up02.sql @@ -0,0 +1,5 @@ +SET LOCAL disallow_full_table_scans = off; + +UPDATE omicron.public.bp_omicron_zone +SET nexus_generation = 1 +WHERE zone_type = 'nexus'; diff --git a/schema/crdb/nexus-generation/up03.sql b/schema/crdb/nexus-generation/up03.sql new file mode 100644 index 00000000000..d7623a84c80 --- /dev/null +++ b/schema/crdb/nexus-generation/up03.sql @@ -0,0 +1 @@ +ALTER TABLE omicron.public.blueprint ADD COLUMN IF NOT EXISTS nexus_generation INT8 NOT NULL DEFAULT 1; diff --git a/schema/crdb/nexus-generation/up04.sql b/schema/crdb/nexus-generation/up04.sql new file mode 100644 index 00000000000..072231d9b01 --- /dev/null +++ b/schema/crdb/nexus-generation/up04.sql @@ -0,0 +1 @@ +ALTER TABLE omicron.public.blueprint ALTER COLUMN nexus_generation DROP DEFAULT; diff --git a/sled-agent/src/rack_setup/plan/service.rs b/sled-agent/src/rack_setup/plan/service.rs index f7941e9724c..37ea4757d9f 100644 --- a/sled-agent/src/rack_setup/plan/service.rs +++ b/sled-agent/src/rack_setup/plan/service.rs @@ -26,7 +26,7 @@ use omicron_common::address::{ RSS_RESERVED_ADDRESSES, ReservedRackSubnet, SLED_PREFIX, get_sled_address, get_switch_zone_address, }; -use omicron_common::api::external::{MacAddr, Vni}; +use omicron_common::api::external::{Generation, MacAddr, Vni}; use omicron_common::api::internal::shared::{ NetworkInterface, NetworkInterfaceKind, SourceNatConfig, SourceNatConfigError, @@ -570,6 +570,7 @@ impl Plan { // development that it might not be. external_tls: !config.external_certificates.is_empty(), external_dns_servers: config.dns_servers.clone(), + nexus_generation: Generation::new(), }, ), filesystem_pool, diff --git a/sled-agent/src/rack_setup/service.rs b/sled-agent/src/rack_setup/service.rs index e2eaac58500..f18e205b6f7 100644 --- a/sled-agent/src/rack_setup/service.rs +++ b/sled-agent/src/rack_setup/service.rs @@ -1631,6 +1631,7 @@ pub(crate) fn build_initial_blueprint_from_sled_configs( // (including creating the recovery silo). external_dns_version: Generation::new(), target_release_minimum_generation: Generation::new(), + nexus_generation: Generation::new(), // Nexus will fill in the CockroachDB values during initialization. cockroachdb_fingerprint: String::new(), cockroachdb_setting_preserve_downgrade: diff --git a/sled-agent/src/sim/server.rs b/sled-agent/src/sim/server.rs index 12561713a75..b41bddf85ca 100644 --- a/sled-agent/src/sim/server.rs +++ b/sled-agent/src/sim/server.rs @@ -453,6 +453,7 @@ pub async fn run_standalone_server( }, external_tls: false, external_dns_servers: vec![], + nexus_generation: Generation::new(), }), filesystem_pool: get_random_zpool(), image_source: BlueprintZoneImageSource::InstallDataset,