From 8de5867f4f7c1bb890eea2da4dc9e3a3dc0769b8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Radek=20Ma=C5=88=C3=A1k?= Date: Thu, 30 Apr 2026 15:46:58 +0200 Subject: [PATCH 1/2] Vendor API --- go.mod | 4 +- go.sum | 8 +- .../github.com/openshift/api/.golangci.yaml | 6 + .../openshift/api/config/v1/types.go | 5 + .../api/config/v1/types_apiserver.go | 4 +- .../api/config/v1/types_authentication.go | 15 +- .../openshift/api/config/v1/types_dns.go | 9 +- .../api/config/v1/types_infrastructure.go | 9 +- .../api/config/v1/types_kmsencryption.go | 264 ++++++- ...tor_01_apiservers-CustomNoUpgrade.crd.yaml | 285 ++++++- ...01_apiservers-DevPreviewNoUpgrade.crd.yaml | 285 ++++++- ...1_apiservers-TechPreviewNoUpgrade.crd.yaml | 271 +++++++ ...1_authentications-CustomNoUpgrade.crd.yaml | 17 +- ...erator_01_authentications-Default.crd.yaml | 7 +- ...thentications-DevPreviewNoUpgrade.crd.yaml | 17 +- ...g-operator_01_authentications-OKD.crd.yaml | 7 +- ...hentications-TechPreviewNoUpgrade.crd.yaml | 17 +- ...operator_01_dnses-CustomNoUpgrade.crd.yaml | 198 +++++ ..._config-operator_01_dnses-Default.crd.yaml | 198 +++++ ...ator_01_dnses-DevPreviewNoUpgrade.crd.yaml | 198 +++++ ..._10_config-operator_01_dnses-OKD.crd.yaml} | 11 +- ...tor_01_dnses-TechPreviewNoUpgrade.crd.yaml | 198 +++++ ...1_infrastructures-CustomNoUpgrade.crd.yaml | 2 + ...erator_01_infrastructures-Default.crd.yaml | 117 +++ ...frastructures-DevPreviewNoUpgrade.crd.yaml | 2 + ...g-operator_01_infrastructures-OKD.crd.yaml | 117 +++ ...rastructures-TechPreviewNoUpgrade.crd.yaml | 2 + .../api/config/v1/zz_generated.deepcopy.go | 126 ++- ..._generated.featuregated-crd-manifests.yaml | 7 +- .../v1/zz_generated.swagger_doc_generated.go | 86 +- .../v1alpha1/types_cluster_monitoring.go | 719 ++++++++++++++++- .../config/v1alpha1/zz_generated.deepcopy.go | 408 ++++++++++ .../zz_generated.swagger_doc_generated.go | 196 ++++- .../api/console/v1/types_console_plugin.go | 1 - ..._generated.featuregated-crd-manifests.yaml | 3 +- .../openshift/api/envtest-releases.yaml | 26 + .../github.com/openshift/api/etcd/install.go | 3 +- .../github.com/openshift/api/etcd/v1/Makefile | 3 + .../github.com/openshift/api/etcd/v1/doc.go | 6 + .../openshift/api/etcd/v1/register.go | 39 + .../api/etcd/v1/types_pacemakercluster.go | 737 ++++++++++++++++++ .../api/etcd/v1/zz_generated.deepcopy.go | 210 +++++ ..._generated.featuregated-crd-manifests.yaml | 23 + .../v1/zz_generated.swagger_doc_generated.go | 89 +++ .../etcd/v1alpha1/types_pacemakercluster.go | 5 +- .../zz_generated.swagger_doc_generated.go | 2 +- vendor/github.com/openshift/api/features.md | 22 +- .../openshift/api/features/features.go | 94 +-- .../api/machine/v1beta1/types_machineset.go | 8 + ...pi_01_machinesets-CustomNoUpgrade.crd.yaml | 9 + ...achine-api_01_machinesets-Default.crd.yaml | 9 + ...1_machinesets-DevPreviewNoUpgrade.crd.yaml | 9 + ...10_machine-api_01_machinesets-OKD.crd.yaml | 9 + ..._machinesets-TechPreviewNoUpgrade.crd.yaml | 9 + .../zz_generated.swagger_doc_generated.go | 1 + .../operator/v1/types_csi_cluster_driver.go | 10 +- .../api/operator/v1/types_ingress.go | 43 + .../api/operator/v1/types_network.go | 115 +++ .../api/operator/v1/zz_generated.deepcopy.go | 34 + ..._generated.featuregated-crd-manifests.yaml | 7 +- .../v1/zz_generated.swagger_doc_generated.go | 26 +- .../api/operator/v1alpha1/types_clusterapi.go | 62 +- .../v1alpha1/zz_generated.deepcopy.go | 47 +- .../zz_generated.swagger_doc_generated.go | 35 +- .../openshift/api/quota/v1/generated.proto | 2 +- .../openshift/api/quota/v1/types.go | 2 +- ..._generated.featuregated-crd-manifests.yaml | 4 +- .../openshift/api/security/v1/generated.proto | 5 +- .../openshift/api/security/v1/types.go | 5 +- ..._generated.featuregated-crd-manifests.yaml | 3 +- .../v1/zz_generated.swagger_doc_generated.go | 4 +- .../config/v1/awsdnsspec.go | 5 + .../config/v1/awskmsconfig.go | 42 - .../config/v1/infrastructurestatus.go | 2 + .../config/v1/kmsconfig.go | 20 +- .../config/v1/prefixedclaimmapping.go | 3 +- .../config/v1/usernameclaimmapping.go | 4 +- .../config/v1/vaultapproleauthentication.go | 30 + .../config/v1/vaultauthentication.go | 43 + .../config/v1/vaultconfigmapreference.go | 28 + .../config/v1/vaultkmsconfig.go | 125 +++ .../config/v1/vaultsecretreference.go | 28 + .../config/v1/vaulttlsconfig.go | 58 ++ .../v1alpha1/alertmanagercustomconfig.go | 2 +- .../config/v1alpha1/clustermonitoringspec.go | 60 ++ .../config/v1alpha1/containerresource.go | 7 + .../config/v1alpha1/metricsserverconfig.go | 2 +- .../config/v1alpha1/monitoringpluginconfig.go | 112 +++ .../nodeexportercollectorbuddyinfoconfig.go | 37 + .../v1alpha1/nodeexportercollectorconfig.go | 169 ++++ .../nodeexportercollectorcpufreqconfig.go | 36 + .../nodeexportercollectorethtoolconfig.go | 36 + .../nodeexportercollectorksmdconfig.go | 37 + .../nodeexportercollectormountstatsconfig.go | 38 + ...eexportercollectornetclasscollectconfig.go | 36 + .../nodeexportercollectornetclassconfig.go | 53 ++ .../nodeexportercollectornetdevconfig.go | 37 + .../nodeexportercollectorprocessesconfig.go | 37 + ...deexportercollectorsystemdcollectconfig.go | 40 + .../nodeexportercollectorsystemdconfig.go | 55 ++ .../nodeexportercollectortcpstatconfig.go | 36 + .../config/v1alpha1/nodeexporterconfig.go | 158 ++++ .../v1alpha1/openshiftstatemetricsconfig.go | 2 +- .../config/v1alpha1/prometheusconfig.go | 10 +- ...rometheusoperatoradmissionwebhookconfig.go | 2 +- .../v1alpha1/prometheusoperatorconfig.go | 2 +- .../config/v1alpha1/telemeterclientconfig.go | 118 +++ .../config/v1alpha1/thanosquerierconfig.go | 117 +++ .../applyconfigurations/internal/internal.go | 375 ++++++++- .../config/applyconfigurations/utils.go | 48 +- .../applyconfigurations/internal/internal.go | 3 + .../machine/v1beta1/machinesetstatus.go | 13 + .../applyconfigurations/internal/internal.go | 56 ++ .../operator/v1/awscsidriverconfigspec.go | 6 + .../operator/v1/bgpmanagedconfig.go | 46 ++ .../v1/ingresscontrollertuningoptions.go | 35 + .../operator/v1/nooverlayconfig.go | 50 ++ .../operator/v1/ovnkubernetesconfig.go | 45 ++ vendor/modules.txt | 5 +- 119 files changed, 7436 insertions(+), 409 deletions(-) create mode 100644 vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_dnses-CustomNoUpgrade.crd.yaml create mode 100644 vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_dnses-Default.crd.yaml create mode 100644 vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_dnses-DevPreviewNoUpgrade.crd.yaml rename vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/{0000_10_config-operator_01_dnses.crd.yaml => 0000_10_config-operator_01_dnses-OKD.crd.yaml} (91%) create mode 100644 vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_dnses-TechPreviewNoUpgrade.crd.yaml create mode 100644 vendor/github.com/openshift/api/etcd/v1/Makefile create mode 100644 vendor/github.com/openshift/api/etcd/v1/doc.go create mode 100644 vendor/github.com/openshift/api/etcd/v1/register.go create mode 100644 vendor/github.com/openshift/api/etcd/v1/types_pacemakercluster.go create mode 100644 vendor/github.com/openshift/api/etcd/v1/zz_generated.deepcopy.go create mode 100644 vendor/github.com/openshift/api/etcd/v1/zz_generated.featuregated-crd-manifests.yaml create mode 100644 vendor/github.com/openshift/api/etcd/v1/zz_generated.swagger_doc_generated.go delete mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/awskmsconfig.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vaultapproleauthentication.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vaultauthentication.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vaultconfigmapreference.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vaultkmsconfig.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vaultsecretreference.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vaulttlsconfig.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/monitoringpluginconfig.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/nodeexportercollectorbuddyinfoconfig.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/nodeexportercollectorconfig.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/nodeexportercollectorcpufreqconfig.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/nodeexportercollectorethtoolconfig.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/nodeexportercollectorksmdconfig.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/nodeexportercollectormountstatsconfig.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/nodeexportercollectornetclasscollectconfig.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/nodeexportercollectornetclassconfig.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/nodeexportercollectornetdevconfig.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/nodeexportercollectorprocessesconfig.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/nodeexportercollectorsystemdcollectconfig.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/nodeexportercollectorsystemdconfig.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/nodeexportercollectortcpstatconfig.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/nodeexporterconfig.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/telemeterclientconfig.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/thanosquerierconfig.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/bgpmanagedconfig.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/nooverlayconfig.go diff --git a/go.mod b/go.mod index 2e4dabd405..5abf5b6080 100644 --- a/go.mod +++ b/go.mod @@ -20,8 +20,8 @@ require ( github.com/onsi/ginkgo/v2 v2.28.1 github.com/onsi/gomega v1.39.1 github.com/openshift-eng/openshift-tests-extension v0.0.0-20260127124016-0fed2b824818 - github.com/openshift/api v0.0.0-20260317165824-54a3998d81eb - github.com/openshift/client-go v0.0.0-20260317180604-743f664b82d1 + github.com/openshift/api v0.0.0-20260429211050-21ed4c20b122 + github.com/openshift/client-go v0.0.0-20260429123927-c81f86abfa6a github.com/openshift/cluster-api-actuator-pkg/testutils v0.0.0-20250910145856-21d03d30056d github.com/openshift/cluster-control-plane-machine-set-operator v0.0.0-20251029084908-344babe6a957 github.com/openshift/controller-runtime-common v0.0.0-20260318085703-1812aed6dbd2 diff --git a/go.sum b/go.sum index c0efc5c97b..2e4ba19229 100644 --- a/go.sum +++ b/go.sum @@ -463,10 +463,10 @@ github.com/opencontainers/selinux v1.13.0 h1:Zza88GWezyT7RLql12URvoxsbLfjFx988+L github.com/opencontainers/selinux v1.13.0/go.mod h1:XxWTed+A/s5NNq4GmYScVy+9jzXhGBVEOAyucdRUY8s= github.com/openshift-eng/openshift-tests-extension v0.0.0-20260127124016-0fed2b824818 h1:jJLE/aCAqDf8U4wc3bE1IEKgIxbb0ICjCNVFA49x/8s= github.com/openshift-eng/openshift-tests-extension v0.0.0-20260127124016-0fed2b824818/go.mod h1:6gkP5f2HL0meusT0Aim8icAspcD1cG055xxBZ9yC68M= -github.com/openshift/api v0.0.0-20260317165824-54a3998d81eb h1:iwBR3mzmyE3EMFx7R3CQ9lOccTS0dNht8TW82aGITg0= -github.com/openshift/api v0.0.0-20260317165824-54a3998d81eb/go.mod h1:pyVjK0nZ4sRs4fuQVQ4rubsJdahI1PB94LnQ8sGdvxo= -github.com/openshift/client-go v0.0.0-20260317180604-743f664b82d1 h1:Hr/R38eg5ZJXfbiaHumjJIN1buDZwhsm4ys4npVCXH0= -github.com/openshift/client-go v0.0.0-20260317180604-743f664b82d1/go.mod h1:Za51LlH76ALiQ/aKGBYJXmyJNkA//IDJ+I///30CA2M= +github.com/openshift/api v0.0.0-20260429211050-21ed4c20b122 h1:Lsw4zRFaiYSDpTHaC5V7vDGLJsroJl62HpkZv4AFQfQ= +github.com/openshift/api v0.0.0-20260429211050-21ed4c20b122/go.mod h1:pyVjK0nZ4sRs4fuQVQ4rubsJdahI1PB94LnQ8sGdvxo= +github.com/openshift/client-go v0.0.0-20260429123927-c81f86abfa6a h1:4GR6seHvlfv0rADe+LCQx63FqSExx6gaSo8uNiyWq+c= +github.com/openshift/client-go v0.0.0-20260429123927-c81f86abfa6a/go.mod h1:Lm7X7aYbAaKhGsNhgYaowP7hiLKwfN/w0r+Q6VlQoI8= github.com/openshift/cluster-api-actuator-pkg/testutils v0.0.0-20250910145856-21d03d30056d h1:+sqUThLi/lmgT5/scmmjnS6+RZFtbdxRAscNfCPyLPI= github.com/openshift/cluster-api-actuator-pkg/testutils v0.0.0-20250910145856-21d03d30056d/go.mod h1:9+FWWWLkVrnBo1eYhA/0Ehlq5JMgIAHtcB0IF+qV1AA= github.com/openshift/cluster-control-plane-machine-set-operator v0.0.0-20251029084908-344babe6a957 h1:eVnkMTFnirnoUOlAUT3Hy8WriIi1JoSrilWym3Dl8Q4= diff --git a/vendor/github.com/openshift/api/.golangci.yaml b/vendor/github.com/openshift/api/.golangci.yaml index 608fb0ed2e..53c9b4009e 100644 --- a/vendor/github.com/openshift/api/.golangci.yaml +++ b/vendor/github.com/openshift/api/.golangci.yaml @@ -111,6 +111,12 @@ linters: # Silence norefs lint for `Ref` field in ClusterAPI as it refers to an OCI image reference, not a kube object reference. path: operator/v1alpha1/types_clusterapi.go text: "noreferences: naming convention \"no-references\": field ClusterAPIInstallerComponentImage.Ref: field names should not contain reference-related words" + - linters: + - kubeapilinter + # PacemakerCluster intentionally marks Conditions as required with XValidation rules + # to enforce specific condition types are always present. + path: etcd/v1/types_pacemakercluster.go + text: "conditions: Conditions field in (PacemakerClusterStatus|PacemakerClusterNodeStatus|PacemakerClusterFencingAgentStatus|PacemakerClusterResourceStatus) is missing the following markers: optional" - linters: - kubeapilinter path: features|payload-command/*.go diff --git a/vendor/github.com/openshift/api/config/v1/types.go b/vendor/github.com/openshift/api/config/v1/types.go index 3e17ca0ccb..e7106ef7ab 100644 --- a/vendor/github.com/openshift/api/config/v1/types.go +++ b/vendor/github.com/openshift/api/config/v1/types.go @@ -284,7 +284,12 @@ type ClientConnectionOverrides struct { } // GenericControllerConfig provides information to configure a controller +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 type GenericControllerConfig struct { + metav1.TypeMeta `json:",inline"` + // servingInfo is the HTTP serving information for the controller's endpoints ServingInfo HTTPServingInfo `json:"servingInfo"` diff --git a/vendor/github.com/openshift/api/config/v1/types_apiserver.go b/vendor/github.com/openshift/api/config/v1/types_apiserver.go index b8a4399dbc..b92a04ed06 100644 --- a/vendor/github.com/openshift/api/config/v1/types_apiserver.go +++ b/vendor/github.com/openshift/api/config/v1/types_apiserver.go @@ -209,7 +209,6 @@ type APIServerNamedServingCert struct { } // APIServerEncryption is used to encrypt sensitive resources on the cluster. -// +openshift:validation:FeatureGateAwareXValidation:featureGate=KMSEncryptionProvider,rule="has(self.type) && self.type == 'KMS' ? has(self.kms) : !has(self.kms)",message="kms config is required when encryption type is KMS, and forbidden otherwise" // +union type APIServerEncryption struct { // type defines what encryption type should be used to encrypt resources at the datastore layer. @@ -238,14 +237,13 @@ type APIServerEncryption struct { // managing the lifecyle of the encryption keys outside of the control plane. // This allows integration with an external provider to manage the data encryption keys securely. // - // +openshift:enable:FeatureGate=KMSEncryptionProvider + // +openshift:enable:FeatureGate=KMSEncryption // +unionMember // +optional KMS *KMSConfig `json:"kms,omitempty"` } // +openshift:validation:FeatureGateAwareEnum:featureGate="",enum="";identity;aescbc;aesgcm -// +openshift:validation:FeatureGateAwareEnum:featureGate=KMSEncryptionProvider,enum="";identity;aescbc;aesgcm;KMS // +openshift:validation:FeatureGateAwareEnum:featureGate=KMSEncryption,enum="";identity;aescbc;aesgcm;KMS type EncryptionType string diff --git a/vendor/github.com/openshift/api/config/v1/types_authentication.go b/vendor/github.com/openshift/api/config/v1/types_authentication.go index 64d0f399b0..1a036bbb67 100644 --- a/vendor/github.com/openshift/api/config/v1/types_authentication.go +++ b/vendor/github.com/openshift/api/config/v1/types_authentication.go @@ -618,6 +618,7 @@ type OIDCClientReference struct { // +openshift:validation:FeatureGateAwareXValidation:featureGate=ExternalOIDC,rule="has(self.claim)",message="claim is required" // +openshift:validation:FeatureGateAwareXValidation:featureGate=ExternalOIDCWithUIDAndExtraClaimMappings,rule="has(self.claim)",message="claim is required" // +openshift:validation:FeatureGateAwareXValidation:featureGate=ExternalOIDCWithUpstreamParity,rule="has(self.claim) ? !has(self.expression) : has(self.expression)",message="precisely one of claim or expression must be set" +// +openshift:validation:FeatureGateAwareXValidation:featureGate=ExternalOIDCWithUpstreamParity,rule="has(self.expression) && size(self.expression) > 0 ? !has(self.prefixPolicy) || self.prefixPolicy != 'Prefix' : true",message="prefixPolicy must not be set to 'Prefix' when expression is set" type UsernameClaimMapping struct { // claim is an optional field that configures the JWT token claim whose value is assigned to the cluster identity field associated with this mapping. // claim is required when the ExternalOIDCWithUpstreamParity feature gate is not enabled. @@ -650,11 +651,9 @@ type UsernameClaimMapping struct { // Allowed values are 'Prefix', 'NoPrefix', and omitted (not provided or an empty string). // // When set to 'Prefix', the value specified in the prefix field will be prepended to the value of the JWT claim. - // // The prefix field must be set when prefixPolicy is 'Prefix'. - // + // Must not be set to 'Prefix' when expression is set. // When set to 'NoPrefix', no prefix will be prepended to the value of the JWT claim. - // // When omitted, this means no opinion and the platform is left to choose any prefixes that are applied which is subject to change over time. // Currently, the platform prepends `{issuerURL}#` to the value of the JWT claim when the claim is not 'email'. // @@ -684,7 +683,7 @@ type UsernameClaimMapping struct { // +enum type UsernamePrefixPolicy string -var ( +const ( // NoOpinion let's the cluster assign prefixes. If the username claim is email, there is no prefix // If the username claim is anything else, it is prefixed by the issuerURL NoOpinion UsernamePrefixPolicy = "" @@ -710,12 +709,14 @@ type UsernamePrefix struct { // PrefixedClaimMapping configures a claim mapping // that allows for an optional prefix. +// +openshift:validation:FeatureGateAwareXValidation:featureGate=ExternalOIDCWithUpstreamParity,rule="has(self.expression) && size(self.expression) > 0 ? (!has(self.prefix) || size(self.prefix) == 0) : true",message="prefix must not be set to a non-empty value when expression is set" type PrefixedClaimMapping struct { TokenClaimMapping `json:",inline"` // prefix is an optional field that configures the prefix that will be applied to the cluster identity attribute during the process of mapping JWT claims to cluster identity attributes. // - // When omitted (""), no prefix is applied to the cluster identity attribute. + // When omitted or set to an empty string (""), no prefix is applied to the cluster identity attribute. + // Must not be set to a non-empty value when expression is set. // // Example: if `prefix` is set to "myoidc:" and the `claim` in JWT contains an array of strings "a", "b" and "c", the mapping will result in an array of string "myoidc:a", "myoidc:b" and "myoidc:c". // @@ -734,10 +735,10 @@ type TokenValidationRuleType string const ( // TokenValidationRuleTypeRequiredClaim indicates that the token must contain a specific claim. // Used as a value for TokenValidationRuleType. - TokenValidationRuleTypeRequiredClaim = "RequiredClaim" + TokenValidationRuleTypeRequiredClaim TokenValidationRuleType = "RequiredClaim" // TokenValidationRuleTypeCEL indicates that the token validation is defined via a CEL expression. // Used as a value for TokenValidationRuleType. - TokenValidationRuleTypeCEL = "CEL" + TokenValidationRuleTypeCEL TokenValidationRuleType = "CEL" ) // TokenClaimValidationRule represents a validation rule based on token claims. diff --git a/vendor/github.com/openshift/api/config/v1/types_dns.go b/vendor/github.com/openshift/api/config/v1/types_dns.go index 06eb75ccf7..efbdc3ae54 100644 --- a/vendor/github.com/openshift/api/config/v1/types_dns.go +++ b/vendor/github.com/openshift/api/config/v1/types_dns.go @@ -134,7 +134,14 @@ type AWSDNSSpec struct { // privateZoneIAMRole contains the ARN of an IAM role that should be assumed when performing // operations on the cluster's private hosted zone specified in the cluster DNS config. // When left empty, no role should be assumed. - // +kubebuilder:validation:Pattern:=`^arn:(aws|aws-cn|aws-us-gov):iam::[0-9]{12}:role\/.*$` + // + // The ARN must follow the format: arn::iam:::role/, where: + // is the AWS partition (aws, aws-cn, aws-us-gov, or aws-eusc), + // is a 12-digit numeric identifier for the AWS account, + // is the IAM role name. + // + // +openshift:validation:FeatureGateAwareXValidation:featureGate="",rule=`matches(self, '^arn:(aws|aws-cn|aws-us-gov):iam::[0-9]{12}:role/.*$')`,message=`privateZoneIAMRole must be a valid AWS IAM role ARN in the format: arn::iam:::role/` + // +openshift:validation:FeatureGateAwareXValidation:featureGate=AWSEuropeanSovereignCloudInstall,rule=`matches(self, '^arn:(aws|aws-cn|aws-us-gov|aws-eusc):iam::[0-9]{12}:role/.*$')`,message=`privateZoneIAMRole must be a valid AWS IAM role ARN in the format: arn::iam:::role/` // +optional PrivateZoneIAMRole string `json:"privateZoneIAMRole"` } diff --git a/vendor/github.com/openshift/api/config/v1/types_infrastructure.go b/vendor/github.com/openshift/api/config/v1/types_infrastructure.go index 369ba1e7a0..c579be3a11 100644 --- a/vendor/github.com/openshift/api/config/v1/types_infrastructure.go +++ b/vendor/github.com/openshift/api/config/v1/types_infrastructure.go @@ -102,11 +102,11 @@ type InfrastructureStatus struct { // and the operators should not configure the operand for highly-available operation // The 'External' mode indicates that the control plane is hosted externally to the cluster and that // its components are not visible within the cluster. + // The 'HighlyAvailableArbiter' mode indicates that the control plane will consist of 2 control-plane nodes + // that run conventional services and 1 smaller sized arbiter node that runs a bare minimum of services to maintain quorum. // +kubebuilder:default=HighlyAvailable - // +openshift:validation:FeatureGateAwareEnum:featureGate="",enum=HighlyAvailable;SingleReplica;External - // +openshift:validation:FeatureGateAwareEnum:featureGate=HighlyAvailableArbiter,enum=HighlyAvailable;HighlyAvailableArbiter;SingleReplica;External - // +openshift:validation:FeatureGateAwareEnum:featureGate=DualReplica,enum=HighlyAvailable;SingleReplica;DualReplica;External - // +openshift:validation:FeatureGateAwareEnum:requiredFeatureGate=HighlyAvailableArbiter;DualReplica,enum=HighlyAvailable;HighlyAvailableArbiter;SingleReplica;DualReplica;External + // +openshift:validation:FeatureGateAwareEnum:featureGate="",enum=HighlyAvailable;HighlyAvailableArbiter;SingleReplica;External + // +openshift:validation:FeatureGateAwareEnum:featureGate=DualReplica,enum=HighlyAvailable;HighlyAvailableArbiter;SingleReplica;DualReplica;External // +optional ControlPlaneTopology TopologyMode `json:"controlPlaneTopology"` @@ -787,7 +787,6 @@ type GCPPlatformStatus struct { // // +default={"dnsType": "PlatformDefault"} // +kubebuilder:default={"dnsType": "PlatformDefault"} - // +openshift:enable:FeatureGate=GCPClusterHostedDNSInstall // +optional // +nullable CloudLoadBalancerConfig *CloudLoadBalancerConfig `json:"cloudLoadBalancerConfig,omitempty"` diff --git a/vendor/github.com/openshift/api/config/v1/types_kmsencryption.go b/vendor/github.com/openshift/api/config/v1/types_kmsencryption.go index 3293204fa4..7ab8f782bb 100644 --- a/vendor/github.com/openshift/api/config/v1/types_kmsencryption.go +++ b/vendor/github.com/openshift/api/config/v1/types_kmsencryption.go @@ -1,55 +1,265 @@ package v1 // KMSConfig defines the configuration for the KMS instance -// that will be used with KMSEncryptionProvider encryption -// +kubebuilder:validation:XValidation:rule="has(self.type) && self.type == 'AWS' ? has(self.aws) : !has(self.aws)",message="aws config is required when kms provider type is AWS, and forbidden otherwise" +// that will be used with KMS encryption +// +kubebuilder:validation:XValidation:rule="self.type == 'Vault' ? has(self.vault) : !has(self.vault)",message="vault config is required when kms provider type is Vault, and forbidden otherwise" // +union type KMSConfig struct { // type defines the kind of platform for the KMS provider. - // Available provider types are AWS only. + // Allowed values are Vault. + // When set to Vault, the plugin connects to a HashiCorp Vault server for key management. // // +unionDiscriminator // +required Type KMSProviderType `json:"type"` - // aws defines the key config for using an AWS KMS instance - // for the encryption. The AWS KMS instance is managed + // vault defines the configuration for the Vault KMS plugin. + // The plugin connects to a Vault Enterprise server that is managed // by the user outside the purview of the control plane. + // This field must be set when type is Vault, and must be unset otherwise. // // +unionMember // +optional - AWS *AWSKMSConfig `json:"aws,omitempty"` + Vault VaultKMSConfig `json:"vault,omitempty,omitzero"` + + // --- TOMBSTONE --- + // aws was a field that allowed configuring AWS KMS. + // It was never implemented and has been removed. + // The field name is reserved to prevent reuse. + // + // +optional + // AWS *AWSKMSConfig `json:"aws,omitempty"` } -// AWSKMSConfig defines the KMS config specific to AWS KMS provider -type AWSKMSConfig struct { - // keyARN specifies the Amazon Resource Name (ARN) of the AWS KMS key used for encryption. - // The value must adhere to the format `arn:aws:kms:::key/`, where: - // - `` is the AWS region consisting of lowercase letters and hyphens followed by a number. - // - `` is a 12-digit numeric identifier for the AWS account. - // - `` is a unique identifier for the KMS key, consisting of lowercase hexadecimal characters and hyphens. +// --- TOMBSTONE --- +// AWSKMSConfig was a type for AWS KMS configuration that was never implemented. +// The type name is reserved to prevent reuse. +// +// type AWSKMSConfig struct { +// KeyARN string `json:"keyARN"` +// Region string `json:"region"` +// } + +// KMSProviderType is a specific supported KMS provider +// +kubebuilder:validation:Enum=Vault +type KMSProviderType string + +const ( + // VaultKMSProvider represents a supported KMS provider for use with HashiCorp Vault + VaultKMSProvider KMSProviderType = "Vault" + + // --- TOMBSTONE --- + // AWSKMSProvider was a constant for AWS KMS support that was never implemented. + // The constant name is reserved to prevent reuse. + // + // AWSKMSProvider KMSProviderType = "AWS" +) + +// VaultSecretReference references a secret in the openshift-config namespace. +type VaultSecretReference struct { + // name is the metadata.name of the referenced secret in the openshift-config namespace. + // The name must be a valid DNS subdomain name: it must contain no more than 253 characters, + // contain only lowercase alphanumeric characters, '-' or '.', and start and end with an alphanumeric character. // - // +kubebuilder:validation:MaxLength=128 // +kubebuilder:validation:MinLength=1 - // +kubebuilder:validation:XValidation:rule="self.matches('^arn:aws:kms:[a-z0-9-]+:[0-9]{12}:key/[a-f0-9-]+$')",message="keyARN must follow the format `arn:aws:kms:::key/`. The account ID must be a 12 digit number and the region and key ID should consist only of lowercase hexadecimal characters and hyphens (-)." + // +kubebuilder:validation:MaxLength=253 + // +kubebuilder:validation:XValidation:rule="!format.dns1123Subdomain().validate(self).hasValue()",message="name must be a valid DNS subdomain name: contain no more than 253 characters, contain only lowercase alphanumeric characters, '-' or '.', and start and end with an alphanumeric character" // +required - KeyARN string `json:"keyARN"` - // region specifies the AWS region where the KMS instance exists, and follows the format - // `--`, e.g.: `us-east-1`. - // Only lowercase letters and hyphens followed by numbers are allowed. + Name string `json:"name,omitempty"` +} + +// VaultConfigMapReference references a ConfigMap in the openshift-config namespace. +type VaultConfigMapReference struct { + // name is the metadata.name of the referenced ConfigMap in the openshift-config namespace. + // The name must be a valid DNS subdomain name: it must contain no more than 253 characters, + // contain only lowercase alphanumeric characters, '-' or '.', and start and end with an alphanumeric character. // - // +kubebuilder:validation:MaxLength=64 // +kubebuilder:validation:MinLength=1 - // +kubebuilder:validation:XValidation:rule="self.matches('^[a-z0-9]+(-[a-z0-9]+)*$')",message="region must be a valid AWS region, consisting of lowercase characters, digits and hyphens (-) only." + // +kubebuilder:validation:MaxLength=253 + // +kubebuilder:validation:XValidation:rule="!format.dns1123Subdomain().validate(self).hasValue()",message="name must be a valid DNS subdomain name: contain no more than 253 characters, contain only lowercase alphanumeric characters, '-' or '.', and start and end with an alphanumeric character" // +required - Region string `json:"region"` + Name string `json:"name,omitempty"` } -// KMSProviderType is a specific supported KMS provider -// +kubebuilder:validation:Enum=AWS -type KMSProviderType string +// VaultAuthentication defines the authentication method used to authenticate with Vault. +// +kubebuilder:validation:XValidation:rule="self.type == 'AppRole' ? has(self.appRole) : !has(self.appRole)",message="appRole config is required when authentication type is AppRole, and forbidden otherwise" +// +union +type VaultAuthentication struct { + // type defines the authentication method used to authenticate with Vault. + // Allowed values are AppRole. + // When set to AppRole, the plugin uses AppRole credentials to authenticate with Vault. + // + // +unionDiscriminator + // +required + Type VaultAuthenticationType `json:"type,omitempty"` + + // appRole defines the configuration for AppRole authentication. + // This field must be set when type is AppRole, and must be unset otherwise. + // + // +unionMember + // +optional + AppRole VaultAppRoleAuthentication `json:"appRole,omitzero"` +} + +// VaultAuthenticationType defines the authentication method type for Vault. +// +kubebuilder:validation:Enum=AppRole +type VaultAuthenticationType string const ( - // AWSKMSProvider represents a supported KMS provider for use with AWS KMS - AWSKMSProvider KMSProviderType = "AWS" + // VaultAuthenticationTypeAppRole represents AppRole authentication method. + VaultAuthenticationTypeAppRole VaultAuthenticationType = "AppRole" ) + +// VaultAppRoleAuthentication defines the configuration for AppRole authentication with Vault. +type VaultAppRoleAuthentication struct { + // secret references a secret in the openshift-config namespace containing + // the AppRole credentials used to authenticate with Vault. + // The secret must contain two keys: "roleID" for the AppRole Role ID and "secretID" for the AppRole Secret ID. + // + // The namespace for the secret is openshift-config. + // + // +required + Secret VaultSecretReference `json:"secret,omitzero"` +} + +// VaultKMSConfig defines the KMS plugin configuration specific to Vault KMS +type VaultKMSConfig struct { + // kmsPluginImage specifies the container image for the HashiCorp Vault KMS plugin. + // + // The image must be a fully qualified OCI image pull spec with a SHA256 digest. + // The format is: host[:port][/namespace]/name@sha256: + // where the digest must be 64 characters long and consist only of lowercase hexadecimal characters, a-f and 0-9. + // The total length must be between 75 and 447 characters. + // + // Short names (e.g., "vault-plugin" or "hashicorp/vault-plugin") are not allowed. + // The registry hostname must be included and must contain at least one dot. + // Image tags (e.g., ":latest", ":v1.0.0") are not allowed. + // + // Consult the OpenShift documentation for compatible plugin versions with your cluster version, + // then obtain the image digest for that version from HashiCorp's container registry. + // + // For disconnected environments, mirror the plugin image to an accessible registry + // and reference the mirrored location with its digest. + // + // +kubebuilder:validation:MinLength=75 + // +kubebuilder:validation:MaxLength=447 + // +kubebuilder:validation:XValidation:rule=`(self.split('@').size() == 2 && self.split('@')[1].matches('^sha256:[a-f0-9]{64}$'))`,message="the OCI Image reference must end with a valid '@sha256:' suffix, where '' is 64 characters long" + // +kubebuilder:validation:XValidation:rule=`(self.split('@')[0].matches('^([a-zA-Z0-9-]+\\.)+[a-zA-Z0-9-]+(:[0-9]{2,5})?(/[a-zA-Z0-9-_.]+)+$'))`,message="the OCI Image name should follow the host[:port][/namespace]/name format, resembling a valid URL without the scheme. Short names are not allowed, the registry hostname must be included." + // +required + KMSPluginImage string `json:"kmsPluginImage,omitempty"` + + // vaultAddress specifies the address of the HashiCorp Vault instance. + // The value must be a valid HTTPS URL containing only scheme, host, and optional port. + // Paths, user info, query parameters, and fragments are not allowed. + // + // Format: https://hostname[:port] + // Example: https://vault.example.com:8200 + // + // The value must be between 1 and 512 characters. + // + // +kubebuilder:validation:XValidation:rule="isURL(self)",message="must be a valid URL" + // +kubebuilder:validation:XValidation:rule="isURL(self) && url(self).getScheme() == 'https'",message="must use the 'https' scheme" + // +kubebuilder:validation:XValidation:rule="isURL(self) && (url(self).getEscapedPath() == '' || url(self).getEscapedPath() == '/')",message="must not contain a path" + // +kubebuilder:validation:XValidation:rule="isURL(self) && url(self).getQuery() == {}",message="must not have a query" + // +kubebuilder:validation:XValidation:rule="self.find('#(.+)$') == ''",message="must not have a fragment" + // +kubebuilder:validation:XValidation:rule="self.find('@') == ''",message="must not have user info" + // +kubebuilder:validation:MaxLength=512 + // +kubebuilder:validation:MinLength=1 + // +required + VaultAddress string `json:"vaultAddress,omitempty"` + + // vaultNamespace specifies the Vault namespace where the Transit secrets engine is mounted. + // This is only applicable for Vault Enterprise installations. + // When this field is not set, no namespace is used. + // + // The value must be between 1 and 4096 characters. + // The namespace cannot end with a forward slash, cannot contain spaces, and cannot be one of the reserved strings: root, sys, audit, auth, cubbyhole, or identity. + // + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=4096 + // +kubebuilder:validation:XValidation:rule="!self.endsWith('/')",message="vaultNamespace cannot end with a forward slash" + // +kubebuilder:validation:XValidation:rule="!self.contains(' ')",message="vaultNamespace cannot contain spaces" + // +kubebuilder:validation:XValidation:rule="!(self in ['root', 'sys', 'audit', 'auth', 'cubbyhole', 'identity'])",message="vaultNamespace cannot be a reserved string (root, sys, audit, auth, cubbyhole, identity)" + // +optional + VaultNamespace string `json:"vaultNamespace,omitempty"` + + // tls contains the TLS configuration for connecting to the Vault server. + // When this field is not set, system default TLS settings are used. + // +optional + TLS VaultTLSConfig `json:"tls,omitzero"` + + // authentication defines the authentication method used to authenticate with Vault. + // + // +required + Authentication VaultAuthentication `json:"authentication,omitzero"` + + // transitMount specifies the mount path of the Vault Transit engine. + // The value must be between 1 and 1024 characters when specified. + // + // When omitted, this means the user has no opinion and the platform is left + // to choose a reasonable default. These defaults are subject to change over time. + // The current default is "transit". + // + // The mount path cannot start or end with a forward slash, cannot contain spaces, + // and cannot contain consecutive forward slashes. + // + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=1024 + // +kubebuilder:validation:XValidation:rule="!self.startsWith('/')",message="transitMount cannot start with a forward slash" + // +kubebuilder:validation:XValidation:rule="!self.endsWith('/')",message="transitMount cannot end with a forward slash" + // +kubebuilder:validation:XValidation:rule="!self.contains(' ')",message="transitMount cannot contain spaces" + // +kubebuilder:validation:XValidation:rule="!self.contains('//')",message="transitMount cannot contain consecutive forward slashes" + // +optional + TransitMount string `json:"transitMount,omitempty"` + + // transitKey specifies the name of the encryption key in Vault's Transit engine. + // This key is used to encrypt and decrypt data. + // + // The key name must be between 1 and 512 characters and cannot contain spaces or forward slashes. + // + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=512 + // +kubebuilder:validation:XValidation:rule="!self.contains(' ')",message="transitKey cannot contain spaces" + // +kubebuilder:validation:XValidation:rule="!self.contains('/')",message="transitKey cannot contain forward slashes" + // +required + TransitKey string `json:"transitKey,omitempty"` +} + +// VaultTLSConfig contains TLS configuration for connecting to Vault. +// +kubebuilder:validation:MinProperties=1 +type VaultTLSConfig struct { + // caBundle references a ConfigMap in the openshift-config namespace containing + // the CA certificate bundle used to verify the TLS connection to the Vault server. + // The ConfigMap must contain the CA bundle in the key "ca-bundle.crt". + // When this field is not set, the system's trusted CA certificates are used. + // + // The namespace for the ConfigMap is openshift-config. + // + // Example ConfigMap: + // apiVersion: v1 + // kind: ConfigMap + // metadata: + // name: vault-ca-bundle + // namespace: openshift-config + // data: + // ca-bundle.crt: | + // -----BEGIN CERTIFICATE----- + // ... + // -----END CERTIFICATE----- + // + // +optional + CABundle VaultConfigMapReference `json:"caBundle,omitzero"` + + // serverName specifies the Server Name Indication (SNI) to use when connecting to Vault via TLS. + // This is useful when the Vault server's hostname doesn't match its TLS certificate. + // When this field is not set, the hostname from vaultAddress is used for SNI. + // + // The value must be a valid DNS hostname: it must contain no more than 253 characters, + // contain only lowercase alphanumeric characters, '-' or '.', and start and end with an alphanumeric character. + // + // +kubebuilder:validation:MaxLength=253 + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:XValidation:rule="!format.dns1123Subdomain().validate(self).hasValue()",message="serverName must be a valid DNS hostname: contain no more than 253 characters, contain only lowercase alphanumeric characters, '-' or '.', and start and end with an alphanumeric character" + // +optional + ServerName string `json:"serverName,omitempty"` +} diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_apiservers-CustomNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_apiservers-CustomNoUpgrade.crd.yaml index d2ba7fc325..003fef92be 100644 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_apiservers-CustomNoUpgrade.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_apiservers-CustomNoUpgrade.crd.yaml @@ -168,59 +168,267 @@ spec: managing the lifecyle of the encryption keys outside of the control plane. This allows integration with an external provider to manage the data encryption keys securely. properties: - aws: + type: + description: |- + type defines the kind of platform for the KMS provider. + Allowed values are Vault. + When set to Vault, the plugin connects to a HashiCorp Vault server for key management. + enum: + - Vault + type: string + vault: description: |- - aws defines the key config for using an AWS KMS instance - for the encryption. The AWS KMS instance is managed + vault defines the configuration for the Vault KMS plugin. + The plugin connects to a Vault Enterprise server that is managed by the user outside the purview of the control plane. + This field must be set when type is Vault, and must be unset otherwise. properties: - keyARN: + authentication: + description: authentication defines the authentication + method used to authenticate with Vault. + properties: + appRole: + description: |- + appRole defines the configuration for AppRole authentication. + This field must be set when type is AppRole, and must be unset otherwise. + properties: + secret: + description: |- + secret references a secret in the openshift-config namespace containing + the AppRole credentials used to authenticate with Vault. + The secret must contain two keys: "roleID" for the AppRole Role ID and "secretID" for the AppRole Secret ID. + + The namespace for the secret is openshift-config. + properties: + name: + description: |- + name is the metadata.name of the referenced secret in the openshift-config namespace. + The name must be a valid DNS subdomain name: it must contain no more than 253 characters, + contain only lowercase alphanumeric characters, '-' or '.', and start and end with an alphanumeric character. + maxLength: 253 + minLength: 1 + type: string + x-kubernetes-validations: + - message: 'name must be a valid DNS subdomain + name: contain no more than 253 characters, + contain only lowercase alphanumeric characters, + ''-'' or ''.'', and start and end with + an alphanumeric character' + rule: '!format.dns1123Subdomain().validate(self).hasValue()' + required: + - name + type: object + required: + - secret + type: object + type: + description: |- + type defines the authentication method used to authenticate with Vault. + Allowed values are AppRole. + When set to AppRole, the plugin uses AppRole credentials to authenticate with Vault. + enum: + - AppRole + type: string + required: + - type + type: object + x-kubernetes-validations: + - message: appRole config is required when authentication + type is AppRole, and forbidden otherwise + rule: 'self.type == ''AppRole'' ? has(self.appRole) + : !has(self.appRole)' + kmsPluginImage: description: |- - keyARN specifies the Amazon Resource Name (ARN) of the AWS KMS key used for encryption. - The value must adhere to the format `arn:aws:kms:::key/`, where: - - `` is the AWS region consisting of lowercase letters and hyphens followed by a number. - - `` is a 12-digit numeric identifier for the AWS account. - - `` is a unique identifier for the KMS key, consisting of lowercase hexadecimal characters and hyphens. - maxLength: 128 + kmsPluginImage specifies the container image for the HashiCorp Vault KMS plugin. + + The image must be a fully qualified OCI image pull spec with a SHA256 digest. + The format is: host[:port][/namespace]/name@sha256: + where the digest must be 64 characters long and consist only of lowercase hexadecimal characters, a-f and 0-9. + The total length must be between 75 and 447 characters. + + Short names (e.g., "vault-plugin" or "hashicorp/vault-plugin") are not allowed. + The registry hostname must be included and must contain at least one dot. + Image tags (e.g., ":latest", ":v1.0.0") are not allowed. + + Consult the OpenShift documentation for compatible plugin versions with your cluster version, + then obtain the image digest for that version from HashiCorp's container registry. + + For disconnected environments, mirror the plugin image to an accessible registry + and reference the mirrored location with its digest. + maxLength: 447 + minLength: 75 + type: string + x-kubernetes-validations: + - message: the OCI Image reference must end with a valid + '@sha256:' suffix, where '' is 64 + characters long + rule: (self.split('@').size() == 2 && self.split('@')[1].matches('^sha256:[a-f0-9]{64}$')) + - message: the OCI Image name should follow the host[:port][/namespace]/name + format, resembling a valid URL without the scheme. + Short names are not allowed, the registry hostname + must be included. + rule: (self.split('@')[0].matches('^([a-zA-Z0-9-]+\\.)+[a-zA-Z0-9-]+(:[0-9]{2,5})?(/[a-zA-Z0-9-_.]+)+$')) + tls: + description: |- + tls contains the TLS configuration for connecting to the Vault server. + When this field is not set, system default TLS settings are used. + minProperties: 1 + properties: + caBundle: + description: |- + caBundle references a ConfigMap in the openshift-config namespace containing + the CA certificate bundle used to verify the TLS connection to the Vault server. + The ConfigMap must contain the CA bundle in the key "ca-bundle.crt". + When this field is not set, the system's trusted CA certificates are used. + + The namespace for the ConfigMap is openshift-config. + + Example ConfigMap: + apiVersion: v1 + kind: ConfigMap + metadata: + name: vault-ca-bundle + namespace: openshift-config + data: + ca-bundle.crt: | + -----BEGIN CERTIFICATE----- + ... + -----END CERTIFICATE----- + properties: + name: + description: |- + name is the metadata.name of the referenced ConfigMap in the openshift-config namespace. + The name must be a valid DNS subdomain name: it must contain no more than 253 characters, + contain only lowercase alphanumeric characters, '-' or '.', and start and end with an alphanumeric character. + maxLength: 253 + minLength: 1 + type: string + x-kubernetes-validations: + - message: 'name must be a valid DNS subdomain + name: contain no more than 253 characters, + contain only lowercase alphanumeric characters, + ''-'' or ''.'', and start and end with an + alphanumeric character' + rule: '!format.dns1123Subdomain().validate(self).hasValue()' + required: + - name + type: object + serverName: + description: |- + serverName specifies the Server Name Indication (SNI) to use when connecting to Vault via TLS. + This is useful when the Vault server's hostname doesn't match its TLS certificate. + When this field is not set, the hostname from vaultAddress is used for SNI. + + The value must be a valid DNS hostname: it must contain no more than 253 characters, + contain only lowercase alphanumeric characters, '-' or '.', and start and end with an alphanumeric character. + maxLength: 253 + minLength: 1 + type: string + x-kubernetes-validations: + - message: 'serverName must be a valid DNS hostname: + contain no more than 253 characters, contain only + lowercase alphanumeric characters, ''-'' or ''.'', + and start and end with an alphanumeric character' + rule: '!format.dns1123Subdomain().validate(self).hasValue()' + type: object + transitKey: + description: |- + transitKey specifies the name of the encryption key in Vault's Transit engine. + This key is used to encrypt and decrypt data. + + The key name must be between 1 and 512 characters and cannot contain spaces or forward slashes. + maxLength: 512 minLength: 1 type: string x-kubernetes-validations: - - message: keyARN must follow the format `arn:aws:kms:::key/`. - The account ID must be a 12 digit number and the region - and key ID should consist only of lowercase hexadecimal - characters and hyphens (-). - rule: self.matches('^arn:aws:kms:[a-z0-9-]+:[0-9]{12}:key/[a-f0-9-]+$') - region: + - message: transitKey cannot contain spaces + rule: '!self.contains('' '')' + - message: transitKey cannot contain forward slashes + rule: '!self.contains(''/'')' + transitMount: description: |- - region specifies the AWS region where the KMS instance exists, and follows the format - `--`, e.g.: `us-east-1`. - Only lowercase letters and hyphens followed by numbers are allowed. - maxLength: 64 + transitMount specifies the mount path of the Vault Transit engine. + The value must be between 1 and 1024 characters when specified. + + When omitted, this means the user has no opinion and the platform is left + to choose a reasonable default. These defaults are subject to change over time. + The current default is "transit". + + The mount path cannot start or end with a forward slash, cannot contain spaces, + and cannot contain consecutive forward slashes. + maxLength: 1024 + minLength: 1 + type: string + x-kubernetes-validations: + - message: transitMount cannot start with a forward slash + rule: '!self.startsWith(''/'')' + - message: transitMount cannot end with a forward slash + rule: '!self.endsWith(''/'')' + - message: transitMount cannot contain spaces + rule: '!self.contains('' '')' + - message: transitMount cannot contain consecutive forward + slashes + rule: '!self.contains(''//'')' + vaultAddress: + description: |- + vaultAddress specifies the address of the HashiCorp Vault instance. + The value must be a valid HTTPS URL containing only scheme, host, and optional port. + Paths, user info, query parameters, and fragments are not allowed. + + Format: https://hostname[:port] + Example: https://vault.example.com:8200 + + The value must be between 1 and 512 characters. + maxLength: 512 + minLength: 1 + type: string + x-kubernetes-validations: + - message: must be a valid URL + rule: isURL(self) + - message: must use the 'https' scheme + rule: isURL(self) && url(self).getScheme() == 'https' + - message: must not contain a path + rule: isURL(self) && (url(self).getEscapedPath() == + '' || url(self).getEscapedPath() == '/') + - message: must not have a query + rule: isURL(self) && url(self).getQuery() == {} + - message: must not have a fragment + rule: self.find('#(.+)$') == '' + - message: must not have user info + rule: self.find('@') == '' + vaultNamespace: + description: |- + vaultNamespace specifies the Vault namespace where the Transit secrets engine is mounted. + This is only applicable for Vault Enterprise installations. + When this field is not set, no namespace is used. + + The value must be between 1 and 4096 characters. + The namespace cannot end with a forward slash, cannot contain spaces, and cannot be one of the reserved strings: root, sys, audit, auth, cubbyhole, or identity. + maxLength: 4096 minLength: 1 type: string x-kubernetes-validations: - - message: region must be a valid AWS region, consisting - of lowercase characters, digits and hyphens (-) only. - rule: self.matches('^[a-z0-9]+(-[a-z0-9]+)*$') + - message: vaultNamespace cannot end with a forward slash + rule: '!self.endsWith(''/'')' + - message: vaultNamespace cannot contain spaces + rule: '!self.contains('' '')' + - message: vaultNamespace cannot be a reserved string + (root, sys, audit, auth, cubbyhole, identity) + rule: '!(self in [''root'', ''sys'', ''audit'', ''auth'', + ''cubbyhole'', ''identity''])' required: - - keyARN - - region + - authentication + - kmsPluginImage + - transitKey + - vaultAddress type: object - type: - description: |- - type defines the kind of platform for the KMS provider. - Available provider types are AWS only. - enum: - - AWS - type: string required: - type type: object x-kubernetes-validations: - - message: aws config is required when kms provider type is AWS, - and forbidden otherwise - rule: 'has(self.type) && self.type == ''AWS'' ? has(self.aws) - : !has(self.aws)' + - message: vault config is required when kms provider type is + Vault, and forbidden otherwise + rule: 'self.type == ''Vault'' ? has(self.vault) : !has(self.vault)' type: description: |- type defines what encryption type should be used to encrypt resources at the datastore layer. @@ -244,11 +452,6 @@ spec: - KMS type: string type: object - x-kubernetes-validations: - - message: kms config is required when encryption type is KMS, and - forbidden otherwise - rule: 'has(self.type) && self.type == ''KMS'' ? has(self.kms) : - !has(self.kms)' servingCerts: description: |- servingCert is the TLS cert info for serving secure traffic. If not specified, operator managed certificates diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_apiservers-DevPreviewNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_apiservers-DevPreviewNoUpgrade.crd.yaml index cabbd04bb7..8a70a13a31 100644 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_apiservers-DevPreviewNoUpgrade.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_apiservers-DevPreviewNoUpgrade.crd.yaml @@ -168,59 +168,267 @@ spec: managing the lifecyle of the encryption keys outside of the control plane. This allows integration with an external provider to manage the data encryption keys securely. properties: - aws: + type: + description: |- + type defines the kind of platform for the KMS provider. + Allowed values are Vault. + When set to Vault, the plugin connects to a HashiCorp Vault server for key management. + enum: + - Vault + type: string + vault: description: |- - aws defines the key config for using an AWS KMS instance - for the encryption. The AWS KMS instance is managed + vault defines the configuration for the Vault KMS plugin. + The plugin connects to a Vault Enterprise server that is managed by the user outside the purview of the control plane. + This field must be set when type is Vault, and must be unset otherwise. properties: - keyARN: + authentication: + description: authentication defines the authentication + method used to authenticate with Vault. + properties: + appRole: + description: |- + appRole defines the configuration for AppRole authentication. + This field must be set when type is AppRole, and must be unset otherwise. + properties: + secret: + description: |- + secret references a secret in the openshift-config namespace containing + the AppRole credentials used to authenticate with Vault. + The secret must contain two keys: "roleID" for the AppRole Role ID and "secretID" for the AppRole Secret ID. + + The namespace for the secret is openshift-config. + properties: + name: + description: |- + name is the metadata.name of the referenced secret in the openshift-config namespace. + The name must be a valid DNS subdomain name: it must contain no more than 253 characters, + contain only lowercase alphanumeric characters, '-' or '.', and start and end with an alphanumeric character. + maxLength: 253 + minLength: 1 + type: string + x-kubernetes-validations: + - message: 'name must be a valid DNS subdomain + name: contain no more than 253 characters, + contain only lowercase alphanumeric characters, + ''-'' or ''.'', and start and end with + an alphanumeric character' + rule: '!format.dns1123Subdomain().validate(self).hasValue()' + required: + - name + type: object + required: + - secret + type: object + type: + description: |- + type defines the authentication method used to authenticate with Vault. + Allowed values are AppRole. + When set to AppRole, the plugin uses AppRole credentials to authenticate with Vault. + enum: + - AppRole + type: string + required: + - type + type: object + x-kubernetes-validations: + - message: appRole config is required when authentication + type is AppRole, and forbidden otherwise + rule: 'self.type == ''AppRole'' ? has(self.appRole) + : !has(self.appRole)' + kmsPluginImage: description: |- - keyARN specifies the Amazon Resource Name (ARN) of the AWS KMS key used for encryption. - The value must adhere to the format `arn:aws:kms:::key/`, where: - - `` is the AWS region consisting of lowercase letters and hyphens followed by a number. - - `` is a 12-digit numeric identifier for the AWS account. - - `` is a unique identifier for the KMS key, consisting of lowercase hexadecimal characters and hyphens. - maxLength: 128 + kmsPluginImage specifies the container image for the HashiCorp Vault KMS plugin. + + The image must be a fully qualified OCI image pull spec with a SHA256 digest. + The format is: host[:port][/namespace]/name@sha256: + where the digest must be 64 characters long and consist only of lowercase hexadecimal characters, a-f and 0-9. + The total length must be between 75 and 447 characters. + + Short names (e.g., "vault-plugin" or "hashicorp/vault-plugin") are not allowed. + The registry hostname must be included and must contain at least one dot. + Image tags (e.g., ":latest", ":v1.0.0") are not allowed. + + Consult the OpenShift documentation for compatible plugin versions with your cluster version, + then obtain the image digest for that version from HashiCorp's container registry. + + For disconnected environments, mirror the plugin image to an accessible registry + and reference the mirrored location with its digest. + maxLength: 447 + minLength: 75 + type: string + x-kubernetes-validations: + - message: the OCI Image reference must end with a valid + '@sha256:' suffix, where '' is 64 + characters long + rule: (self.split('@').size() == 2 && self.split('@')[1].matches('^sha256:[a-f0-9]{64}$')) + - message: the OCI Image name should follow the host[:port][/namespace]/name + format, resembling a valid URL without the scheme. + Short names are not allowed, the registry hostname + must be included. + rule: (self.split('@')[0].matches('^([a-zA-Z0-9-]+\\.)+[a-zA-Z0-9-]+(:[0-9]{2,5})?(/[a-zA-Z0-9-_.]+)+$')) + tls: + description: |- + tls contains the TLS configuration for connecting to the Vault server. + When this field is not set, system default TLS settings are used. + minProperties: 1 + properties: + caBundle: + description: |- + caBundle references a ConfigMap in the openshift-config namespace containing + the CA certificate bundle used to verify the TLS connection to the Vault server. + The ConfigMap must contain the CA bundle in the key "ca-bundle.crt". + When this field is not set, the system's trusted CA certificates are used. + + The namespace for the ConfigMap is openshift-config. + + Example ConfigMap: + apiVersion: v1 + kind: ConfigMap + metadata: + name: vault-ca-bundle + namespace: openshift-config + data: + ca-bundle.crt: | + -----BEGIN CERTIFICATE----- + ... + -----END CERTIFICATE----- + properties: + name: + description: |- + name is the metadata.name of the referenced ConfigMap in the openshift-config namespace. + The name must be a valid DNS subdomain name: it must contain no more than 253 characters, + contain only lowercase alphanumeric characters, '-' or '.', and start and end with an alphanumeric character. + maxLength: 253 + minLength: 1 + type: string + x-kubernetes-validations: + - message: 'name must be a valid DNS subdomain + name: contain no more than 253 characters, + contain only lowercase alphanumeric characters, + ''-'' or ''.'', and start and end with an + alphanumeric character' + rule: '!format.dns1123Subdomain().validate(self).hasValue()' + required: + - name + type: object + serverName: + description: |- + serverName specifies the Server Name Indication (SNI) to use when connecting to Vault via TLS. + This is useful when the Vault server's hostname doesn't match its TLS certificate. + When this field is not set, the hostname from vaultAddress is used for SNI. + + The value must be a valid DNS hostname: it must contain no more than 253 characters, + contain only lowercase alphanumeric characters, '-' or '.', and start and end with an alphanumeric character. + maxLength: 253 + minLength: 1 + type: string + x-kubernetes-validations: + - message: 'serverName must be a valid DNS hostname: + contain no more than 253 characters, contain only + lowercase alphanumeric characters, ''-'' or ''.'', + and start and end with an alphanumeric character' + rule: '!format.dns1123Subdomain().validate(self).hasValue()' + type: object + transitKey: + description: |- + transitKey specifies the name of the encryption key in Vault's Transit engine. + This key is used to encrypt and decrypt data. + + The key name must be between 1 and 512 characters and cannot contain spaces or forward slashes. + maxLength: 512 minLength: 1 type: string x-kubernetes-validations: - - message: keyARN must follow the format `arn:aws:kms:::key/`. - The account ID must be a 12 digit number and the region - and key ID should consist only of lowercase hexadecimal - characters and hyphens (-). - rule: self.matches('^arn:aws:kms:[a-z0-9-]+:[0-9]{12}:key/[a-f0-9-]+$') - region: + - message: transitKey cannot contain spaces + rule: '!self.contains('' '')' + - message: transitKey cannot contain forward slashes + rule: '!self.contains(''/'')' + transitMount: description: |- - region specifies the AWS region where the KMS instance exists, and follows the format - `--`, e.g.: `us-east-1`. - Only lowercase letters and hyphens followed by numbers are allowed. - maxLength: 64 + transitMount specifies the mount path of the Vault Transit engine. + The value must be between 1 and 1024 characters when specified. + + When omitted, this means the user has no opinion and the platform is left + to choose a reasonable default. These defaults are subject to change over time. + The current default is "transit". + + The mount path cannot start or end with a forward slash, cannot contain spaces, + and cannot contain consecutive forward slashes. + maxLength: 1024 + minLength: 1 + type: string + x-kubernetes-validations: + - message: transitMount cannot start with a forward slash + rule: '!self.startsWith(''/'')' + - message: transitMount cannot end with a forward slash + rule: '!self.endsWith(''/'')' + - message: transitMount cannot contain spaces + rule: '!self.contains('' '')' + - message: transitMount cannot contain consecutive forward + slashes + rule: '!self.contains(''//'')' + vaultAddress: + description: |- + vaultAddress specifies the address of the HashiCorp Vault instance. + The value must be a valid HTTPS URL containing only scheme, host, and optional port. + Paths, user info, query parameters, and fragments are not allowed. + + Format: https://hostname[:port] + Example: https://vault.example.com:8200 + + The value must be between 1 and 512 characters. + maxLength: 512 + minLength: 1 + type: string + x-kubernetes-validations: + - message: must be a valid URL + rule: isURL(self) + - message: must use the 'https' scheme + rule: isURL(self) && url(self).getScheme() == 'https' + - message: must not contain a path + rule: isURL(self) && (url(self).getEscapedPath() == + '' || url(self).getEscapedPath() == '/') + - message: must not have a query + rule: isURL(self) && url(self).getQuery() == {} + - message: must not have a fragment + rule: self.find('#(.+)$') == '' + - message: must not have user info + rule: self.find('@') == '' + vaultNamespace: + description: |- + vaultNamespace specifies the Vault namespace where the Transit secrets engine is mounted. + This is only applicable for Vault Enterprise installations. + When this field is not set, no namespace is used. + + The value must be between 1 and 4096 characters. + The namespace cannot end with a forward slash, cannot contain spaces, and cannot be one of the reserved strings: root, sys, audit, auth, cubbyhole, or identity. + maxLength: 4096 minLength: 1 type: string x-kubernetes-validations: - - message: region must be a valid AWS region, consisting - of lowercase characters, digits and hyphens (-) only. - rule: self.matches('^[a-z0-9]+(-[a-z0-9]+)*$') + - message: vaultNamespace cannot end with a forward slash + rule: '!self.endsWith(''/'')' + - message: vaultNamespace cannot contain spaces + rule: '!self.contains('' '')' + - message: vaultNamespace cannot be a reserved string + (root, sys, audit, auth, cubbyhole, identity) + rule: '!(self in [''root'', ''sys'', ''audit'', ''auth'', + ''cubbyhole'', ''identity''])' required: - - keyARN - - region + - authentication + - kmsPluginImage + - transitKey + - vaultAddress type: object - type: - description: |- - type defines the kind of platform for the KMS provider. - Available provider types are AWS only. - enum: - - AWS - type: string required: - type type: object x-kubernetes-validations: - - message: aws config is required when kms provider type is AWS, - and forbidden otherwise - rule: 'has(self.type) && self.type == ''AWS'' ? has(self.aws) - : !has(self.aws)' + - message: vault config is required when kms provider type is + Vault, and forbidden otherwise + rule: 'self.type == ''Vault'' ? has(self.vault) : !has(self.vault)' type: description: |- type defines what encryption type should be used to encrypt resources at the datastore layer. @@ -244,11 +452,6 @@ spec: - KMS type: string type: object - x-kubernetes-validations: - - message: kms config is required when encryption type is KMS, and - forbidden otherwise - rule: 'has(self.type) && self.type == ''KMS'' ? has(self.kms) : - !has(self.kms)' servingCerts: description: |- servingCert is the TLS cert info for serving secure traffic. If not specified, operator managed certificates diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_apiservers-TechPreviewNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_apiservers-TechPreviewNoUpgrade.crd.yaml index b21c31dd43..9cfd4f6217 100644 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_apiservers-TechPreviewNoUpgrade.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_apiservers-TechPreviewNoUpgrade.crd.yaml @@ -158,6 +158,277 @@ spec: description: encryption allows the configuration of encryption of resources at the datastore layer. properties: + kms: + description: |- + kms defines the configuration for the external KMS instance that manages the encryption keys, + when KMS encryption is enabled sensitive resources will be encrypted using keys managed by an + externally configured KMS instance. + + The Key Management Service (KMS) instance provides symmetric encryption and is responsible for + managing the lifecyle of the encryption keys outside of the control plane. + This allows integration with an external provider to manage the data encryption keys securely. + properties: + type: + description: |- + type defines the kind of platform for the KMS provider. + Allowed values are Vault. + When set to Vault, the plugin connects to a HashiCorp Vault server for key management. + enum: + - Vault + type: string + vault: + description: |- + vault defines the configuration for the Vault KMS plugin. + The plugin connects to a Vault Enterprise server that is managed + by the user outside the purview of the control plane. + This field must be set when type is Vault, and must be unset otherwise. + properties: + authentication: + description: authentication defines the authentication + method used to authenticate with Vault. + properties: + appRole: + description: |- + appRole defines the configuration for AppRole authentication. + This field must be set when type is AppRole, and must be unset otherwise. + properties: + secret: + description: |- + secret references a secret in the openshift-config namespace containing + the AppRole credentials used to authenticate with Vault. + The secret must contain two keys: "roleID" for the AppRole Role ID and "secretID" for the AppRole Secret ID. + + The namespace for the secret is openshift-config. + properties: + name: + description: |- + name is the metadata.name of the referenced secret in the openshift-config namespace. + The name must be a valid DNS subdomain name: it must contain no more than 253 characters, + contain only lowercase alphanumeric characters, '-' or '.', and start and end with an alphanumeric character. + maxLength: 253 + minLength: 1 + type: string + x-kubernetes-validations: + - message: 'name must be a valid DNS subdomain + name: contain no more than 253 characters, + contain only lowercase alphanumeric characters, + ''-'' or ''.'', and start and end with + an alphanumeric character' + rule: '!format.dns1123Subdomain().validate(self).hasValue()' + required: + - name + type: object + required: + - secret + type: object + type: + description: |- + type defines the authentication method used to authenticate with Vault. + Allowed values are AppRole. + When set to AppRole, the plugin uses AppRole credentials to authenticate with Vault. + enum: + - AppRole + type: string + required: + - type + type: object + x-kubernetes-validations: + - message: appRole config is required when authentication + type is AppRole, and forbidden otherwise + rule: 'self.type == ''AppRole'' ? has(self.appRole) + : !has(self.appRole)' + kmsPluginImage: + description: |- + kmsPluginImage specifies the container image for the HashiCorp Vault KMS plugin. + + The image must be a fully qualified OCI image pull spec with a SHA256 digest. + The format is: host[:port][/namespace]/name@sha256: + where the digest must be 64 characters long and consist only of lowercase hexadecimal characters, a-f and 0-9. + The total length must be between 75 and 447 characters. + + Short names (e.g., "vault-plugin" or "hashicorp/vault-plugin") are not allowed. + The registry hostname must be included and must contain at least one dot. + Image tags (e.g., ":latest", ":v1.0.0") are not allowed. + + Consult the OpenShift documentation for compatible plugin versions with your cluster version, + then obtain the image digest for that version from HashiCorp's container registry. + + For disconnected environments, mirror the plugin image to an accessible registry + and reference the mirrored location with its digest. + maxLength: 447 + minLength: 75 + type: string + x-kubernetes-validations: + - message: the OCI Image reference must end with a valid + '@sha256:' suffix, where '' is 64 + characters long + rule: (self.split('@').size() == 2 && self.split('@')[1].matches('^sha256:[a-f0-9]{64}$')) + - message: the OCI Image name should follow the host[:port][/namespace]/name + format, resembling a valid URL without the scheme. + Short names are not allowed, the registry hostname + must be included. + rule: (self.split('@')[0].matches('^([a-zA-Z0-9-]+\\.)+[a-zA-Z0-9-]+(:[0-9]{2,5})?(/[a-zA-Z0-9-_.]+)+$')) + tls: + description: |- + tls contains the TLS configuration for connecting to the Vault server. + When this field is not set, system default TLS settings are used. + minProperties: 1 + properties: + caBundle: + description: |- + caBundle references a ConfigMap in the openshift-config namespace containing + the CA certificate bundle used to verify the TLS connection to the Vault server. + The ConfigMap must contain the CA bundle in the key "ca-bundle.crt". + When this field is not set, the system's trusted CA certificates are used. + + The namespace for the ConfigMap is openshift-config. + + Example ConfigMap: + apiVersion: v1 + kind: ConfigMap + metadata: + name: vault-ca-bundle + namespace: openshift-config + data: + ca-bundle.crt: | + -----BEGIN CERTIFICATE----- + ... + -----END CERTIFICATE----- + properties: + name: + description: |- + name is the metadata.name of the referenced ConfigMap in the openshift-config namespace. + The name must be a valid DNS subdomain name: it must contain no more than 253 characters, + contain only lowercase alphanumeric characters, '-' or '.', and start and end with an alphanumeric character. + maxLength: 253 + minLength: 1 + type: string + x-kubernetes-validations: + - message: 'name must be a valid DNS subdomain + name: contain no more than 253 characters, + contain only lowercase alphanumeric characters, + ''-'' or ''.'', and start and end with an + alphanumeric character' + rule: '!format.dns1123Subdomain().validate(self).hasValue()' + required: + - name + type: object + serverName: + description: |- + serverName specifies the Server Name Indication (SNI) to use when connecting to Vault via TLS. + This is useful when the Vault server's hostname doesn't match its TLS certificate. + When this field is not set, the hostname from vaultAddress is used for SNI. + + The value must be a valid DNS hostname: it must contain no more than 253 characters, + contain only lowercase alphanumeric characters, '-' or '.', and start and end with an alphanumeric character. + maxLength: 253 + minLength: 1 + type: string + x-kubernetes-validations: + - message: 'serverName must be a valid DNS hostname: + contain no more than 253 characters, contain only + lowercase alphanumeric characters, ''-'' or ''.'', + and start and end with an alphanumeric character' + rule: '!format.dns1123Subdomain().validate(self).hasValue()' + type: object + transitKey: + description: |- + transitKey specifies the name of the encryption key in Vault's Transit engine. + This key is used to encrypt and decrypt data. + + The key name must be between 1 and 512 characters and cannot contain spaces or forward slashes. + maxLength: 512 + minLength: 1 + type: string + x-kubernetes-validations: + - message: transitKey cannot contain spaces + rule: '!self.contains('' '')' + - message: transitKey cannot contain forward slashes + rule: '!self.contains(''/'')' + transitMount: + description: |- + transitMount specifies the mount path of the Vault Transit engine. + The value must be between 1 and 1024 characters when specified. + + When omitted, this means the user has no opinion and the platform is left + to choose a reasonable default. These defaults are subject to change over time. + The current default is "transit". + + The mount path cannot start or end with a forward slash, cannot contain spaces, + and cannot contain consecutive forward slashes. + maxLength: 1024 + minLength: 1 + type: string + x-kubernetes-validations: + - message: transitMount cannot start with a forward slash + rule: '!self.startsWith(''/'')' + - message: transitMount cannot end with a forward slash + rule: '!self.endsWith(''/'')' + - message: transitMount cannot contain spaces + rule: '!self.contains('' '')' + - message: transitMount cannot contain consecutive forward + slashes + rule: '!self.contains(''//'')' + vaultAddress: + description: |- + vaultAddress specifies the address of the HashiCorp Vault instance. + The value must be a valid HTTPS URL containing only scheme, host, and optional port. + Paths, user info, query parameters, and fragments are not allowed. + + Format: https://hostname[:port] + Example: https://vault.example.com:8200 + + The value must be between 1 and 512 characters. + maxLength: 512 + minLength: 1 + type: string + x-kubernetes-validations: + - message: must be a valid URL + rule: isURL(self) + - message: must use the 'https' scheme + rule: isURL(self) && url(self).getScheme() == 'https' + - message: must not contain a path + rule: isURL(self) && (url(self).getEscapedPath() == + '' || url(self).getEscapedPath() == '/') + - message: must not have a query + rule: isURL(self) && url(self).getQuery() == {} + - message: must not have a fragment + rule: self.find('#(.+)$') == '' + - message: must not have user info + rule: self.find('@') == '' + vaultNamespace: + description: |- + vaultNamespace specifies the Vault namespace where the Transit secrets engine is mounted. + This is only applicable for Vault Enterprise installations. + When this field is not set, no namespace is used. + + The value must be between 1 and 4096 characters. + The namespace cannot end with a forward slash, cannot contain spaces, and cannot be one of the reserved strings: root, sys, audit, auth, cubbyhole, or identity. + maxLength: 4096 + minLength: 1 + type: string + x-kubernetes-validations: + - message: vaultNamespace cannot end with a forward slash + rule: '!self.endsWith(''/'')' + - message: vaultNamespace cannot contain spaces + rule: '!self.contains('' '')' + - message: vaultNamespace cannot be a reserved string + (root, sys, audit, auth, cubbyhole, identity) + rule: '!(self in [''root'', ''sys'', ''audit'', ''auth'', + ''cubbyhole'', ''identity''])' + required: + - authentication + - kmsPluginImage + - transitKey + - vaultAddress + type: object + required: + - type + type: object + x-kubernetes-validations: + - message: vault config is required when kms provider type is + Vault, and forbidden otherwise + rule: 'self.type == ''Vault'' ? has(self.vault) : !has(self.vault)' type: description: |- type defines what encryption type should be used to encrypt resources at the datastore layer. diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_authentications-CustomNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_authentications-CustomNoUpgrade.crd.yaml index b4b8eb306f..cd737e2727 100644 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_authentications-CustomNoUpgrade.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_authentications-CustomNoUpgrade.crd.yaml @@ -212,12 +212,18 @@ spec: description: |- prefix is an optional field that configures the prefix that will be applied to the cluster identity attribute during the process of mapping JWT claims to cluster identity attributes. - When omitted (""), no prefix is applied to the cluster identity attribute. + When omitted or set to an empty string (""), no prefix is applied to the cluster identity attribute. + Must not be set to a non-empty value when expression is set. Example: if `prefix` is set to "myoidc:" and the `claim` in JWT contains an array of strings "a", "b" and "c", the mapping will result in an array of string "myoidc:a", "myoidc:b" and "myoidc:c". type: string type: object x-kubernetes-validations: + - message: prefix must not be set to a non-empty value when + expression is set + rule: 'has(self.expression) && size(self.expression) > + 0 ? (!has(self.prefix) || size(self.prefix) == 0) : + true' - message: expression must not be set if claim is specified and is not an empty string rule: '(size(self.?claim.orValue("")) > 0) ? !has(self.expression) @@ -316,11 +322,9 @@ spec: Allowed values are 'Prefix', 'NoPrefix', and omitted (not provided or an empty string). When set to 'Prefix', the value specified in the prefix field will be prepended to the value of the JWT claim. - The prefix field must be set when prefixPolicy is 'Prefix'. - + Must not be set to 'Prefix' when expression is set. When set to 'NoPrefix', no prefix will be prepended to the value of the JWT claim. - When omitted, this means no opinion and the platform is left to choose any prefixes that are applied which is subject to change over time. Currently, the platform prepends `{issuerURL}#` to the value of the JWT claim when the claim is not 'email'. @@ -341,6 +345,11 @@ spec: - message: precisely one of claim or expression must be set rule: 'has(self.claim) ? !has(self.expression) : has(self.expression)' + - message: prefixPolicy must not be set to 'Prefix' when + expression is set + rule: 'has(self.expression) && size(self.expression) > + 0 ? !has(self.prefixPolicy) || self.prefixPolicy != + ''Prefix'' : true' - message: prefix must be set if prefixPolicy is 'Prefix', but must remain unset otherwise rule: 'has(self.prefixPolicy) && self.prefixPolicy == diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_authentications-Default.crd.yaml b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_authentications-Default.crd.yaml index d1e44f6ced..5e6be8db9f 100644 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_authentications-Default.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_authentications-Default.crd.yaml @@ -199,7 +199,8 @@ spec: description: |- prefix is an optional field that configures the prefix that will be applied to the cluster identity attribute during the process of mapping JWT claims to cluster identity attributes. - When omitted (""), no prefix is applied to the cluster identity attribute. + When omitted or set to an empty string (""), no prefix is applied to the cluster identity attribute. + Must not be set to a non-empty value when expression is set. Example: if `prefix` is set to "myoidc:" and the `claim` in JWT contains an array of strings "a", "b" and "c", the mapping will result in an array of string "myoidc:a", "myoidc:b" and "myoidc:c". type: string @@ -288,11 +289,9 @@ spec: Allowed values are 'Prefix', 'NoPrefix', and omitted (not provided or an empty string). When set to 'Prefix', the value specified in the prefix field will be prepended to the value of the JWT claim. - The prefix field must be set when prefixPolicy is 'Prefix'. - + Must not be set to 'Prefix' when expression is set. When set to 'NoPrefix', no prefix will be prepended to the value of the JWT claim. - When omitted, this means no opinion and the platform is left to choose any prefixes that are applied which is subject to change over time. Currently, the platform prepends `{issuerURL}#` to the value of the JWT claim when the claim is not 'email'. diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_authentications-DevPreviewNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_authentications-DevPreviewNoUpgrade.crd.yaml index c46017aa3d..bf116984ff 100644 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_authentications-DevPreviewNoUpgrade.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_authentications-DevPreviewNoUpgrade.crd.yaml @@ -212,12 +212,18 @@ spec: description: |- prefix is an optional field that configures the prefix that will be applied to the cluster identity attribute during the process of mapping JWT claims to cluster identity attributes. - When omitted (""), no prefix is applied to the cluster identity attribute. + When omitted or set to an empty string (""), no prefix is applied to the cluster identity attribute. + Must not be set to a non-empty value when expression is set. Example: if `prefix` is set to "myoidc:" and the `claim` in JWT contains an array of strings "a", "b" and "c", the mapping will result in an array of string "myoidc:a", "myoidc:b" and "myoidc:c". type: string type: object x-kubernetes-validations: + - message: prefix must not be set to a non-empty value when + expression is set + rule: 'has(self.expression) && size(self.expression) > + 0 ? (!has(self.prefix) || size(self.prefix) == 0) : + true' - message: expression must not be set if claim is specified and is not an empty string rule: '(size(self.?claim.orValue("")) > 0) ? !has(self.expression) @@ -316,11 +322,9 @@ spec: Allowed values are 'Prefix', 'NoPrefix', and omitted (not provided or an empty string). When set to 'Prefix', the value specified in the prefix field will be prepended to the value of the JWT claim. - The prefix field must be set when prefixPolicy is 'Prefix'. - + Must not be set to 'Prefix' when expression is set. When set to 'NoPrefix', no prefix will be prepended to the value of the JWT claim. - When omitted, this means no opinion and the platform is left to choose any prefixes that are applied which is subject to change over time. Currently, the platform prepends `{issuerURL}#` to the value of the JWT claim when the claim is not 'email'. @@ -341,6 +345,11 @@ spec: - message: precisely one of claim or expression must be set rule: 'has(self.claim) ? !has(self.expression) : has(self.expression)' + - message: prefixPolicy must not be set to 'Prefix' when + expression is set + rule: 'has(self.expression) && size(self.expression) > + 0 ? !has(self.prefixPolicy) || self.prefixPolicy != + ''Prefix'' : true' - message: prefix must be set if prefixPolicy is 'Prefix', but must remain unset otherwise rule: 'has(self.prefixPolicy) && self.prefixPolicy == diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_authentications-OKD.crd.yaml b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_authentications-OKD.crd.yaml index 5b5956b1fb..dcfe61e693 100644 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_authentications-OKD.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_authentications-OKD.crd.yaml @@ -199,7 +199,8 @@ spec: description: |- prefix is an optional field that configures the prefix that will be applied to the cluster identity attribute during the process of mapping JWT claims to cluster identity attributes. - When omitted (""), no prefix is applied to the cluster identity attribute. + When omitted or set to an empty string (""), no prefix is applied to the cluster identity attribute. + Must not be set to a non-empty value when expression is set. Example: if `prefix` is set to "myoidc:" and the `claim` in JWT contains an array of strings "a", "b" and "c", the mapping will result in an array of string "myoidc:a", "myoidc:b" and "myoidc:c". type: string @@ -288,11 +289,9 @@ spec: Allowed values are 'Prefix', 'NoPrefix', and omitted (not provided or an empty string). When set to 'Prefix', the value specified in the prefix field will be prepended to the value of the JWT claim. - The prefix field must be set when prefixPolicy is 'Prefix'. - + Must not be set to 'Prefix' when expression is set. When set to 'NoPrefix', no prefix will be prepended to the value of the JWT claim. - When omitted, this means no opinion and the platform is left to choose any prefixes that are applied which is subject to change over time. Currently, the platform prepends `{issuerURL}#` to the value of the JWT claim when the claim is not 'email'. diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_authentications-TechPreviewNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_authentications-TechPreviewNoUpgrade.crd.yaml index 9da4945a7f..de0dd293a8 100644 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_authentications-TechPreviewNoUpgrade.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_authentications-TechPreviewNoUpgrade.crd.yaml @@ -212,12 +212,18 @@ spec: description: |- prefix is an optional field that configures the prefix that will be applied to the cluster identity attribute during the process of mapping JWT claims to cluster identity attributes. - When omitted (""), no prefix is applied to the cluster identity attribute. + When omitted or set to an empty string (""), no prefix is applied to the cluster identity attribute. + Must not be set to a non-empty value when expression is set. Example: if `prefix` is set to "myoidc:" and the `claim` in JWT contains an array of strings "a", "b" and "c", the mapping will result in an array of string "myoidc:a", "myoidc:b" and "myoidc:c". type: string type: object x-kubernetes-validations: + - message: prefix must not be set to a non-empty value when + expression is set + rule: 'has(self.expression) && size(self.expression) > + 0 ? (!has(self.prefix) || size(self.prefix) == 0) : + true' - message: expression must not be set if claim is specified and is not an empty string rule: '(size(self.?claim.orValue("")) > 0) ? !has(self.expression) @@ -316,11 +322,9 @@ spec: Allowed values are 'Prefix', 'NoPrefix', and omitted (not provided or an empty string). When set to 'Prefix', the value specified in the prefix field will be prepended to the value of the JWT claim. - The prefix field must be set when prefixPolicy is 'Prefix'. - + Must not be set to 'Prefix' when expression is set. When set to 'NoPrefix', no prefix will be prepended to the value of the JWT claim. - When omitted, this means no opinion and the platform is left to choose any prefixes that are applied which is subject to change over time. Currently, the platform prepends `{issuerURL}#` to the value of the JWT claim when the claim is not 'email'. @@ -341,6 +345,11 @@ spec: - message: precisely one of claim or expression must be set rule: 'has(self.claim) ? !has(self.expression) : has(self.expression)' + - message: prefixPolicy must not be set to 'Prefix' when + expression is set + rule: 'has(self.expression) && size(self.expression) > + 0 ? !has(self.prefixPolicy) || self.prefixPolicy != + ''Prefix'' : true' - message: prefix must be set if prefixPolicy is 'Prefix', but must remain unset otherwise rule: 'has(self.prefixPolicy) && self.prefixPolicy == diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_dnses-CustomNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_dnses-CustomNoUpgrade.crd.yaml new file mode 100644 index 0000000000..05d56f63cf --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_dnses-CustomNoUpgrade.crd.yaml @@ -0,0 +1,198 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/470 + api.openshift.io/merged-by-featuregates: "true" + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + release.openshift.io/bootstrap-required: "true" + release.openshift.io/feature-set: CustomNoUpgrade + name: dnses.config.openshift.io +spec: + group: config.openshift.io + names: + kind: DNS + listKind: DNSList + plural: dnses + singular: dns + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: |- + DNS holds cluster-wide information about DNS. The canonical name is `cluster` + + Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: spec holds user settable values for configuration + properties: + baseDomain: + description: |- + baseDomain is the base domain of the cluster. All managed DNS records will + be sub-domains of this base. + + For example, given the base domain `openshift.example.com`, an API server + DNS record may be created for `cluster-api.openshift.example.com`. + + Once set, this field cannot be changed. + type: string + platform: + description: |- + platform holds configuration specific to the underlying + infrastructure provider for DNS. + When omitted, this means the user has no opinion and the platform is left + to choose reasonable defaults. These defaults are subject to change over time. + properties: + aws: + description: aws contains DNS configuration specific to the Amazon + Web Services cloud provider. + properties: + privateZoneIAMRole: + description: |- + privateZoneIAMRole contains the ARN of an IAM role that should be assumed when performing + operations on the cluster's private hosted zone specified in the cluster DNS config. + When left empty, no role should be assumed. + + The ARN must follow the format: arn::iam:::role/, where: + is the AWS partition (aws, aws-cn, aws-us-gov, or aws-eusc), + is a 12-digit numeric identifier for the AWS account, + is the IAM role name. + type: string + x-kubernetes-validations: + - message: 'privateZoneIAMRole must be a valid AWS IAM role + ARN in the format: arn::iam:::role/' + rule: matches(self, '^arn:(aws|aws-cn|aws-us-gov|aws-eusc):iam::[0-9]{12}:role/.*$') + type: object + type: + description: |- + type is the underlying infrastructure provider for the cluster. + Allowed values: "", "AWS". + + Individual components may not support all platforms, + and must handle unrecognized platforms with best-effort defaults. + enum: + - "" + - AWS + - Azure + - BareMetal + - GCP + - Libvirt + - OpenStack + - None + - VSphere + - oVirt + - IBMCloud + - KubeVirt + - EquinixMetal + - PowerVS + - AlibabaCloud + - Nutanix + - External + type: string + x-kubernetes-validations: + - message: allowed values are '' and 'AWS' + rule: self in ['','AWS'] + required: + - type + type: object + x-kubernetes-validations: + - message: aws configuration is required when platform is AWS, and + forbidden otherwise + rule: 'has(self.type) && self.type == ''AWS'' ? has(self.aws) : + !has(self.aws)' + privateZone: + description: |- + privateZone is the location where all the DNS records that are only available internally + to the cluster exist. + + If this field is nil, no private records should be created. + + Once set, this field cannot be changed. + properties: + id: + description: |- + id is the identifier that can be used to find the DNS hosted zone. + + on AWS zone can be fetched using `ID` as id in [1] + on Azure zone can be fetched using `ID` as a pre-determined name in [2], + on GCP zone can be fetched using `ID` as a pre-determined name in [3]. + + [1]: https://docs.aws.amazon.com/cli/latest/reference/route53/get-hosted-zone.html#options + [2]: https://docs.microsoft.com/en-us/cli/azure/network/dns/zone?view=azure-cli-latest#az-network-dns-zone-show + [3]: https://cloud.google.com/dns/docs/reference/v1/managedZones/get + type: string + tags: + additionalProperties: + type: string + description: |- + tags can be used to query the DNS hosted zone. + + on AWS, resourcegroupstaggingapi [1] can be used to fetch a zone using `Tags` as tag-filters, + + [1]: https://docs.aws.amazon.com/cli/latest/reference/resourcegroupstaggingapi/get-resources.html#options + type: object + type: object + publicZone: + description: |- + publicZone is the location where all the DNS records that are publicly accessible to + the internet exist. + + If this field is nil, no public records should be created. + + Once set, this field cannot be changed. + properties: + id: + description: |- + id is the identifier that can be used to find the DNS hosted zone. + + on AWS zone can be fetched using `ID` as id in [1] + on Azure zone can be fetched using `ID` as a pre-determined name in [2], + on GCP zone can be fetched using `ID` as a pre-determined name in [3]. + + [1]: https://docs.aws.amazon.com/cli/latest/reference/route53/get-hosted-zone.html#options + [2]: https://docs.microsoft.com/en-us/cli/azure/network/dns/zone?view=azure-cli-latest#az-network-dns-zone-show + [3]: https://cloud.google.com/dns/docs/reference/v1/managedZones/get + type: string + tags: + additionalProperties: + type: string + description: |- + tags can be used to query the DNS hosted zone. + + on AWS, resourcegroupstaggingapi [1] can be used to fetch a zone using `Tags` as tag-filters, + + [1]: https://docs.aws.amazon.com/cli/latest/reference/resourcegroupstaggingapi/get-resources.html#options + type: object + type: object + type: object + status: + description: status holds observed values from the cluster. They may not + be overridden. + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_dnses-Default.crd.yaml b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_dnses-Default.crd.yaml new file mode 100644 index 0000000000..93954dcc36 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_dnses-Default.crd.yaml @@ -0,0 +1,198 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/470 + api.openshift.io/merged-by-featuregates: "true" + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + release.openshift.io/bootstrap-required: "true" + release.openshift.io/feature-set: Default + name: dnses.config.openshift.io +spec: + group: config.openshift.io + names: + kind: DNS + listKind: DNSList + plural: dnses + singular: dns + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: |- + DNS holds cluster-wide information about DNS. The canonical name is `cluster` + + Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: spec holds user settable values for configuration + properties: + baseDomain: + description: |- + baseDomain is the base domain of the cluster. All managed DNS records will + be sub-domains of this base. + + For example, given the base domain `openshift.example.com`, an API server + DNS record may be created for `cluster-api.openshift.example.com`. + + Once set, this field cannot be changed. + type: string + platform: + description: |- + platform holds configuration specific to the underlying + infrastructure provider for DNS. + When omitted, this means the user has no opinion and the platform is left + to choose reasonable defaults. These defaults are subject to change over time. + properties: + aws: + description: aws contains DNS configuration specific to the Amazon + Web Services cloud provider. + properties: + privateZoneIAMRole: + description: |- + privateZoneIAMRole contains the ARN of an IAM role that should be assumed when performing + operations on the cluster's private hosted zone specified in the cluster DNS config. + When left empty, no role should be assumed. + + The ARN must follow the format: arn::iam:::role/, where: + is the AWS partition (aws, aws-cn, aws-us-gov, or aws-eusc), + is a 12-digit numeric identifier for the AWS account, + is the IAM role name. + type: string + x-kubernetes-validations: + - message: 'privateZoneIAMRole must be a valid AWS IAM role + ARN in the format: arn::iam:::role/' + rule: matches(self, '^arn:(aws|aws-cn|aws-us-gov):iam::[0-9]{12}:role/.*$') + type: object + type: + description: |- + type is the underlying infrastructure provider for the cluster. + Allowed values: "", "AWS". + + Individual components may not support all platforms, + and must handle unrecognized platforms with best-effort defaults. + enum: + - "" + - AWS + - Azure + - BareMetal + - GCP + - Libvirt + - OpenStack + - None + - VSphere + - oVirt + - IBMCloud + - KubeVirt + - EquinixMetal + - PowerVS + - AlibabaCloud + - Nutanix + - External + type: string + x-kubernetes-validations: + - message: allowed values are '' and 'AWS' + rule: self in ['','AWS'] + required: + - type + type: object + x-kubernetes-validations: + - message: aws configuration is required when platform is AWS, and + forbidden otherwise + rule: 'has(self.type) && self.type == ''AWS'' ? has(self.aws) : + !has(self.aws)' + privateZone: + description: |- + privateZone is the location where all the DNS records that are only available internally + to the cluster exist. + + If this field is nil, no private records should be created. + + Once set, this field cannot be changed. + properties: + id: + description: |- + id is the identifier that can be used to find the DNS hosted zone. + + on AWS zone can be fetched using `ID` as id in [1] + on Azure zone can be fetched using `ID` as a pre-determined name in [2], + on GCP zone can be fetched using `ID` as a pre-determined name in [3]. + + [1]: https://docs.aws.amazon.com/cli/latest/reference/route53/get-hosted-zone.html#options + [2]: https://docs.microsoft.com/en-us/cli/azure/network/dns/zone?view=azure-cli-latest#az-network-dns-zone-show + [3]: https://cloud.google.com/dns/docs/reference/v1/managedZones/get + type: string + tags: + additionalProperties: + type: string + description: |- + tags can be used to query the DNS hosted zone. + + on AWS, resourcegroupstaggingapi [1] can be used to fetch a zone using `Tags` as tag-filters, + + [1]: https://docs.aws.amazon.com/cli/latest/reference/resourcegroupstaggingapi/get-resources.html#options + type: object + type: object + publicZone: + description: |- + publicZone is the location where all the DNS records that are publicly accessible to + the internet exist. + + If this field is nil, no public records should be created. + + Once set, this field cannot be changed. + properties: + id: + description: |- + id is the identifier that can be used to find the DNS hosted zone. + + on AWS zone can be fetched using `ID` as id in [1] + on Azure zone can be fetched using `ID` as a pre-determined name in [2], + on GCP zone can be fetched using `ID` as a pre-determined name in [3]. + + [1]: https://docs.aws.amazon.com/cli/latest/reference/route53/get-hosted-zone.html#options + [2]: https://docs.microsoft.com/en-us/cli/azure/network/dns/zone?view=azure-cli-latest#az-network-dns-zone-show + [3]: https://cloud.google.com/dns/docs/reference/v1/managedZones/get + type: string + tags: + additionalProperties: + type: string + description: |- + tags can be used to query the DNS hosted zone. + + on AWS, resourcegroupstaggingapi [1] can be used to fetch a zone using `Tags` as tag-filters, + + [1]: https://docs.aws.amazon.com/cli/latest/reference/resourcegroupstaggingapi/get-resources.html#options + type: object + type: object + type: object + status: + description: status holds observed values from the cluster. They may not + be overridden. + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_dnses-DevPreviewNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_dnses-DevPreviewNoUpgrade.crd.yaml new file mode 100644 index 0000000000..f2d9157713 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_dnses-DevPreviewNoUpgrade.crd.yaml @@ -0,0 +1,198 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/470 + api.openshift.io/merged-by-featuregates: "true" + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + release.openshift.io/bootstrap-required: "true" + release.openshift.io/feature-set: DevPreviewNoUpgrade + name: dnses.config.openshift.io +spec: + group: config.openshift.io + names: + kind: DNS + listKind: DNSList + plural: dnses + singular: dns + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: |- + DNS holds cluster-wide information about DNS. The canonical name is `cluster` + + Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: spec holds user settable values for configuration + properties: + baseDomain: + description: |- + baseDomain is the base domain of the cluster. All managed DNS records will + be sub-domains of this base. + + For example, given the base domain `openshift.example.com`, an API server + DNS record may be created for `cluster-api.openshift.example.com`. + + Once set, this field cannot be changed. + type: string + platform: + description: |- + platform holds configuration specific to the underlying + infrastructure provider for DNS. + When omitted, this means the user has no opinion and the platform is left + to choose reasonable defaults. These defaults are subject to change over time. + properties: + aws: + description: aws contains DNS configuration specific to the Amazon + Web Services cloud provider. + properties: + privateZoneIAMRole: + description: |- + privateZoneIAMRole contains the ARN of an IAM role that should be assumed when performing + operations on the cluster's private hosted zone specified in the cluster DNS config. + When left empty, no role should be assumed. + + The ARN must follow the format: arn::iam:::role/, where: + is the AWS partition (aws, aws-cn, aws-us-gov, or aws-eusc), + is a 12-digit numeric identifier for the AWS account, + is the IAM role name. + type: string + x-kubernetes-validations: + - message: 'privateZoneIAMRole must be a valid AWS IAM role + ARN in the format: arn::iam:::role/' + rule: matches(self, '^arn:(aws|aws-cn|aws-us-gov|aws-eusc):iam::[0-9]{12}:role/.*$') + type: object + type: + description: |- + type is the underlying infrastructure provider for the cluster. + Allowed values: "", "AWS". + + Individual components may not support all platforms, + and must handle unrecognized platforms with best-effort defaults. + enum: + - "" + - AWS + - Azure + - BareMetal + - GCP + - Libvirt + - OpenStack + - None + - VSphere + - oVirt + - IBMCloud + - KubeVirt + - EquinixMetal + - PowerVS + - AlibabaCloud + - Nutanix + - External + type: string + x-kubernetes-validations: + - message: allowed values are '' and 'AWS' + rule: self in ['','AWS'] + required: + - type + type: object + x-kubernetes-validations: + - message: aws configuration is required when platform is AWS, and + forbidden otherwise + rule: 'has(self.type) && self.type == ''AWS'' ? has(self.aws) : + !has(self.aws)' + privateZone: + description: |- + privateZone is the location where all the DNS records that are only available internally + to the cluster exist. + + If this field is nil, no private records should be created. + + Once set, this field cannot be changed. + properties: + id: + description: |- + id is the identifier that can be used to find the DNS hosted zone. + + on AWS zone can be fetched using `ID` as id in [1] + on Azure zone can be fetched using `ID` as a pre-determined name in [2], + on GCP zone can be fetched using `ID` as a pre-determined name in [3]. + + [1]: https://docs.aws.amazon.com/cli/latest/reference/route53/get-hosted-zone.html#options + [2]: https://docs.microsoft.com/en-us/cli/azure/network/dns/zone?view=azure-cli-latest#az-network-dns-zone-show + [3]: https://cloud.google.com/dns/docs/reference/v1/managedZones/get + type: string + tags: + additionalProperties: + type: string + description: |- + tags can be used to query the DNS hosted zone. + + on AWS, resourcegroupstaggingapi [1] can be used to fetch a zone using `Tags` as tag-filters, + + [1]: https://docs.aws.amazon.com/cli/latest/reference/resourcegroupstaggingapi/get-resources.html#options + type: object + type: object + publicZone: + description: |- + publicZone is the location where all the DNS records that are publicly accessible to + the internet exist. + + If this field is nil, no public records should be created. + + Once set, this field cannot be changed. + properties: + id: + description: |- + id is the identifier that can be used to find the DNS hosted zone. + + on AWS zone can be fetched using `ID` as id in [1] + on Azure zone can be fetched using `ID` as a pre-determined name in [2], + on GCP zone can be fetched using `ID` as a pre-determined name in [3]. + + [1]: https://docs.aws.amazon.com/cli/latest/reference/route53/get-hosted-zone.html#options + [2]: https://docs.microsoft.com/en-us/cli/azure/network/dns/zone?view=azure-cli-latest#az-network-dns-zone-show + [3]: https://cloud.google.com/dns/docs/reference/v1/managedZones/get + type: string + tags: + additionalProperties: + type: string + description: |- + tags can be used to query the DNS hosted zone. + + on AWS, resourcegroupstaggingapi [1] can be used to fetch a zone using `Tags` as tag-filters, + + [1]: https://docs.aws.amazon.com/cli/latest/reference/resourcegroupstaggingapi/get-resources.html#options + type: object + type: object + type: object + status: + description: status holds observed values from the cluster. They may not + be overridden. + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_dnses.crd.yaml b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_dnses-OKD.crd.yaml similarity index 91% rename from vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_dnses.crd.yaml rename to vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_dnses-OKD.crd.yaml index 06fb0be0b2..0631512bca 100644 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_dnses.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_dnses-OKD.crd.yaml @@ -7,6 +7,7 @@ metadata: include.release.openshift.io/ibm-cloud-managed: "true" include.release.openshift.io/self-managed-high-availability: "true" release.openshift.io/bootstrap-required: "true" + release.openshift.io/feature-set: OKD name: dnses.config.openshift.io spec: group: config.openshift.io @@ -71,8 +72,16 @@ spec: privateZoneIAMRole contains the ARN of an IAM role that should be assumed when performing operations on the cluster's private hosted zone specified in the cluster DNS config. When left empty, no role should be assumed. - pattern: ^arn:(aws|aws-cn|aws-us-gov):iam::[0-9]{12}:role\/.*$ + + The ARN must follow the format: arn::iam:::role/, where: + is the AWS partition (aws, aws-cn, aws-us-gov, or aws-eusc), + is a 12-digit numeric identifier for the AWS account, + is the IAM role name. type: string + x-kubernetes-validations: + - message: 'privateZoneIAMRole must be a valid AWS IAM role + ARN in the format: arn::iam:::role/' + rule: matches(self, '^arn:(aws|aws-cn|aws-us-gov):iam::[0-9]{12}:role/.*$') type: object type: description: |- diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_dnses-TechPreviewNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_dnses-TechPreviewNoUpgrade.crd.yaml new file mode 100644 index 0000000000..ce4e9b77f0 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_dnses-TechPreviewNoUpgrade.crd.yaml @@ -0,0 +1,198 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/470 + api.openshift.io/merged-by-featuregates: "true" + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + release.openshift.io/bootstrap-required: "true" + release.openshift.io/feature-set: TechPreviewNoUpgrade + name: dnses.config.openshift.io +spec: + group: config.openshift.io + names: + kind: DNS + listKind: DNSList + plural: dnses + singular: dns + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: |- + DNS holds cluster-wide information about DNS. The canonical name is `cluster` + + Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: spec holds user settable values for configuration + properties: + baseDomain: + description: |- + baseDomain is the base domain of the cluster. All managed DNS records will + be sub-domains of this base. + + For example, given the base domain `openshift.example.com`, an API server + DNS record may be created for `cluster-api.openshift.example.com`. + + Once set, this field cannot be changed. + type: string + platform: + description: |- + platform holds configuration specific to the underlying + infrastructure provider for DNS. + When omitted, this means the user has no opinion and the platform is left + to choose reasonable defaults. These defaults are subject to change over time. + properties: + aws: + description: aws contains DNS configuration specific to the Amazon + Web Services cloud provider. + properties: + privateZoneIAMRole: + description: |- + privateZoneIAMRole contains the ARN of an IAM role that should be assumed when performing + operations on the cluster's private hosted zone specified in the cluster DNS config. + When left empty, no role should be assumed. + + The ARN must follow the format: arn::iam:::role/, where: + is the AWS partition (aws, aws-cn, aws-us-gov, or aws-eusc), + is a 12-digit numeric identifier for the AWS account, + is the IAM role name. + type: string + x-kubernetes-validations: + - message: 'privateZoneIAMRole must be a valid AWS IAM role + ARN in the format: arn::iam:::role/' + rule: matches(self, '^arn:(aws|aws-cn|aws-us-gov|aws-eusc):iam::[0-9]{12}:role/.*$') + type: object + type: + description: |- + type is the underlying infrastructure provider for the cluster. + Allowed values: "", "AWS". + + Individual components may not support all platforms, + and must handle unrecognized platforms with best-effort defaults. + enum: + - "" + - AWS + - Azure + - BareMetal + - GCP + - Libvirt + - OpenStack + - None + - VSphere + - oVirt + - IBMCloud + - KubeVirt + - EquinixMetal + - PowerVS + - AlibabaCloud + - Nutanix + - External + type: string + x-kubernetes-validations: + - message: allowed values are '' and 'AWS' + rule: self in ['','AWS'] + required: + - type + type: object + x-kubernetes-validations: + - message: aws configuration is required when platform is AWS, and + forbidden otherwise + rule: 'has(self.type) && self.type == ''AWS'' ? has(self.aws) : + !has(self.aws)' + privateZone: + description: |- + privateZone is the location where all the DNS records that are only available internally + to the cluster exist. + + If this field is nil, no private records should be created. + + Once set, this field cannot be changed. + properties: + id: + description: |- + id is the identifier that can be used to find the DNS hosted zone. + + on AWS zone can be fetched using `ID` as id in [1] + on Azure zone can be fetched using `ID` as a pre-determined name in [2], + on GCP zone can be fetched using `ID` as a pre-determined name in [3]. + + [1]: https://docs.aws.amazon.com/cli/latest/reference/route53/get-hosted-zone.html#options + [2]: https://docs.microsoft.com/en-us/cli/azure/network/dns/zone?view=azure-cli-latest#az-network-dns-zone-show + [3]: https://cloud.google.com/dns/docs/reference/v1/managedZones/get + type: string + tags: + additionalProperties: + type: string + description: |- + tags can be used to query the DNS hosted zone. + + on AWS, resourcegroupstaggingapi [1] can be used to fetch a zone using `Tags` as tag-filters, + + [1]: https://docs.aws.amazon.com/cli/latest/reference/resourcegroupstaggingapi/get-resources.html#options + type: object + type: object + publicZone: + description: |- + publicZone is the location where all the DNS records that are publicly accessible to + the internet exist. + + If this field is nil, no public records should be created. + + Once set, this field cannot be changed. + properties: + id: + description: |- + id is the identifier that can be used to find the DNS hosted zone. + + on AWS zone can be fetched using `ID` as id in [1] + on Azure zone can be fetched using `ID` as a pre-determined name in [2], + on GCP zone can be fetched using `ID` as a pre-determined name in [3]. + + [1]: https://docs.aws.amazon.com/cli/latest/reference/route53/get-hosted-zone.html#options + [2]: https://docs.microsoft.com/en-us/cli/azure/network/dns/zone?view=azure-cli-latest#az-network-dns-zone-show + [3]: https://cloud.google.com/dns/docs/reference/v1/managedZones/get + type: string + tags: + additionalProperties: + type: string + description: |- + tags can be used to query the DNS hosted zone. + + on AWS, resourcegroupstaggingapi [1] can be used to fetch a zone using `Tags` as tag-filters, + + [1]: https://docs.aws.amazon.com/cli/latest/reference/resourcegroupstaggingapi/get-resources.html#options + type: object + type: object + type: object + status: + description: status holds observed values from the cluster. They may not + be overridden. + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_infrastructures-CustomNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_infrastructures-CustomNoUpgrade.crd.yaml index 9086d4a572..69e9d1108a 100644 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_infrastructures-CustomNoUpgrade.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_infrastructures-CustomNoUpgrade.crd.yaml @@ -1127,6 +1127,8 @@ spec: and the operators should not configure the operand for highly-available operation The 'External' mode indicates that the control plane is hosted externally to the cluster and that its components are not visible within the cluster. + The 'HighlyAvailableArbiter' mode indicates that the control plane will consist of 2 control-plane nodes + that run conventional services and 1 smaller sized arbiter node that runs a bare minimum of services to maintain quorum. enum: - HighlyAvailable - HighlyAvailableArbiter diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_infrastructures-Default.crd.yaml b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_infrastructures-Default.crd.yaml index 803c48a1e2..8d94616b35 100644 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_infrastructures-Default.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_infrastructures-Default.crd.yaml @@ -828,6 +828,17 @@ spec: - topology - zone type: object + x-kubernetes-validations: + - message: when zoneAffinity type is HostGroup, regionAffinity + type must be ComputeCluster + rule: 'has(self.zoneAffinity) && self.zoneAffinity.type + == ''HostGroup'' ? has(self.regionAffinity) && self.regionAffinity.type + == ''ComputeCluster'' : true' + - message: when zoneAffinity type is ComputeCluster, regionAffinity + type must be Datacenter + rule: 'has(self.zoneAffinity) && self.zoneAffinity.type + == ''ComputeCluster'' ? has(self.regionAffinity) && + self.regionAffinity.type == ''Datacenter'' : true' type: array x-kubernetes-list-map-keys: - name @@ -1050,10 +1061,13 @@ spec: and the operators should not configure the operand for highly-available operation The 'External' mode indicates that the control plane is hosted externally to the cluster and that its components are not visible within the cluster. + The 'HighlyAvailableArbiter' mode indicates that the control plane will consist of 2 control-plane nodes + that run conventional services and 1 smaller sized arbiter node that runs a bare minimum of services to maintain quorum. enum: - HighlyAvailable - HighlyAvailableArbiter - SingleReplica + - DualReplica - External type: string cpuPartitioning: @@ -1259,6 +1273,109 @@ spec: description: armEndpoint specifies a URL to use for resource management in non-soverign clouds such as Azure Stack. type: string + cloudLoadBalancerConfig: + default: + dnsType: PlatformDefault + description: |- + cloudLoadBalancerConfig holds configuration related to DNS and cloud + load balancers. It allows configuration of in-cluster DNS as an alternative + to the platform default DNS implementation. + When using the ClusterHosted DNS type, Load Balancer IP addresses + must be provided for the API and internal API load balancers as well as the + ingress load balancer. + properties: + clusterHosted: + description: |- + clusterHosted holds the IP addresses of API, API-Int and Ingress Load + Balancers on Cloud Platforms. The DNS solution hosted within the cluster + use these IP addresses to provide resolution for API, API-Int and Ingress + services. + properties: + apiIntLoadBalancerIPs: + description: |- + apiIntLoadBalancerIPs holds Load Balancer IPs for the internal API service. + These Load Balancer IP addresses can be IPv4 and/or IPv6 addresses. + Entries in the apiIntLoadBalancerIPs must be unique. + A maximum of 16 IP addresses are permitted. + format: ip + items: + description: IP is an IP address (for example, "10.0.0.0" + or "fd00::"). + maxLength: 39 + minLength: 1 + type: string + x-kubernetes-validations: + - message: value must be a valid IP address + rule: isIP(self) + maxItems: 16 + type: array + x-kubernetes-list-type: set + apiLoadBalancerIPs: + description: |- + apiLoadBalancerIPs holds Load Balancer IPs for the API service. + These Load Balancer IP addresses can be IPv4 and/or IPv6 addresses. + Could be empty for private clusters. + Entries in the apiLoadBalancerIPs must be unique. + A maximum of 16 IP addresses are permitted. + format: ip + items: + description: IP is an IP address (for example, "10.0.0.0" + or "fd00::"). + maxLength: 39 + minLength: 1 + type: string + x-kubernetes-validations: + - message: value must be a valid IP address + rule: isIP(self) + maxItems: 16 + type: array + x-kubernetes-list-type: set + ingressLoadBalancerIPs: + description: |- + ingressLoadBalancerIPs holds IPs for Ingress Load Balancers. + These Load Balancer IP addresses can be IPv4 and/or IPv6 addresses. + Entries in the ingressLoadBalancerIPs must be unique. + A maximum of 16 IP addresses are permitted. + format: ip + items: + description: IP is an IP address (for example, "10.0.0.0" + or "fd00::"). + maxLength: 39 + minLength: 1 + type: string + x-kubernetes-validations: + - message: value must be a valid IP address + rule: isIP(self) + maxItems: 16 + type: array + x-kubernetes-list-type: set + type: object + dnsType: + default: PlatformDefault + description: |- + dnsType indicates the type of DNS solution in use within the cluster. Its default value of + `PlatformDefault` indicates that the cluster's DNS is the default provided by the cloud platform. + It can be set to `ClusterHosted` to bypass the configuration of the cloud default DNS. In this mode, + the cluster needs to provide a self-hosted DNS solution for the cluster's installation to succeed. + The cluster's use of the cloud's Load Balancers is unaffected by this setting. + The value is immutable after it has been set at install time. + Currently, there is no way for the customer to add additional DNS entries into the cluster hosted DNS. + Enabling this functionality allows the user to start their own DNS solution outside the cluster after + installation is complete. The customer would be responsible for configuring this custom DNS solution, + and it can be run in addition to the in-cluster DNS solution. + enum: + - ClusterHosted + - PlatformDefault + type: string + x-kubernetes-validations: + - message: dnsType is immutable + rule: oldSelf == '' || self == oldSelf + type: object + x-kubernetes-validations: + - message: clusterHosted is permitted only when dnsType is + ClusterHosted + rule: 'has(self.dnsType) && self.dnsType != ''ClusterHosted'' + ? !has(self.clusterHosted) : true' cloudName: description: |- cloudName is the name of the Azure cloud environment which can be used to configure the Azure SDK diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_infrastructures-DevPreviewNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_infrastructures-DevPreviewNoUpgrade.crd.yaml index de1a68c90a..7d1ecbc19b 100644 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_infrastructures-DevPreviewNoUpgrade.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_infrastructures-DevPreviewNoUpgrade.crd.yaml @@ -1127,6 +1127,8 @@ spec: and the operators should not configure the operand for highly-available operation The 'External' mode indicates that the control plane is hosted externally to the cluster and that its components are not visible within the cluster. + The 'HighlyAvailableArbiter' mode indicates that the control plane will consist of 2 control-plane nodes + that run conventional services and 1 smaller sized arbiter node that runs a bare minimum of services to maintain quorum. enum: - HighlyAvailable - HighlyAvailableArbiter diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_infrastructures-OKD.crd.yaml b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_infrastructures-OKD.crd.yaml index 245bc3ea63..b107d7e44c 100644 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_infrastructures-OKD.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_infrastructures-OKD.crd.yaml @@ -828,6 +828,17 @@ spec: - topology - zone type: object + x-kubernetes-validations: + - message: when zoneAffinity type is HostGroup, regionAffinity + type must be ComputeCluster + rule: 'has(self.zoneAffinity) && self.zoneAffinity.type + == ''HostGroup'' ? has(self.regionAffinity) && self.regionAffinity.type + == ''ComputeCluster'' : true' + - message: when zoneAffinity type is ComputeCluster, regionAffinity + type must be Datacenter + rule: 'has(self.zoneAffinity) && self.zoneAffinity.type + == ''ComputeCluster'' ? has(self.regionAffinity) && + self.regionAffinity.type == ''Datacenter'' : true' type: array x-kubernetes-list-map-keys: - name @@ -1050,10 +1061,13 @@ spec: and the operators should not configure the operand for highly-available operation The 'External' mode indicates that the control plane is hosted externally to the cluster and that its components are not visible within the cluster. + The 'HighlyAvailableArbiter' mode indicates that the control plane will consist of 2 control-plane nodes + that run conventional services and 1 smaller sized arbiter node that runs a bare minimum of services to maintain quorum. enum: - HighlyAvailable - HighlyAvailableArbiter - SingleReplica + - DualReplica - External type: string cpuPartitioning: @@ -1259,6 +1273,109 @@ spec: description: armEndpoint specifies a URL to use for resource management in non-soverign clouds such as Azure Stack. type: string + cloudLoadBalancerConfig: + default: + dnsType: PlatformDefault + description: |- + cloudLoadBalancerConfig holds configuration related to DNS and cloud + load balancers. It allows configuration of in-cluster DNS as an alternative + to the platform default DNS implementation. + When using the ClusterHosted DNS type, Load Balancer IP addresses + must be provided for the API and internal API load balancers as well as the + ingress load balancer. + properties: + clusterHosted: + description: |- + clusterHosted holds the IP addresses of API, API-Int and Ingress Load + Balancers on Cloud Platforms. The DNS solution hosted within the cluster + use these IP addresses to provide resolution for API, API-Int and Ingress + services. + properties: + apiIntLoadBalancerIPs: + description: |- + apiIntLoadBalancerIPs holds Load Balancer IPs for the internal API service. + These Load Balancer IP addresses can be IPv4 and/or IPv6 addresses. + Entries in the apiIntLoadBalancerIPs must be unique. + A maximum of 16 IP addresses are permitted. + format: ip + items: + description: IP is an IP address (for example, "10.0.0.0" + or "fd00::"). + maxLength: 39 + minLength: 1 + type: string + x-kubernetes-validations: + - message: value must be a valid IP address + rule: isIP(self) + maxItems: 16 + type: array + x-kubernetes-list-type: set + apiLoadBalancerIPs: + description: |- + apiLoadBalancerIPs holds Load Balancer IPs for the API service. + These Load Balancer IP addresses can be IPv4 and/or IPv6 addresses. + Could be empty for private clusters. + Entries in the apiLoadBalancerIPs must be unique. + A maximum of 16 IP addresses are permitted. + format: ip + items: + description: IP is an IP address (for example, "10.0.0.0" + or "fd00::"). + maxLength: 39 + minLength: 1 + type: string + x-kubernetes-validations: + - message: value must be a valid IP address + rule: isIP(self) + maxItems: 16 + type: array + x-kubernetes-list-type: set + ingressLoadBalancerIPs: + description: |- + ingressLoadBalancerIPs holds IPs for Ingress Load Balancers. + These Load Balancer IP addresses can be IPv4 and/or IPv6 addresses. + Entries in the ingressLoadBalancerIPs must be unique. + A maximum of 16 IP addresses are permitted. + format: ip + items: + description: IP is an IP address (for example, "10.0.0.0" + or "fd00::"). + maxLength: 39 + minLength: 1 + type: string + x-kubernetes-validations: + - message: value must be a valid IP address + rule: isIP(self) + maxItems: 16 + type: array + x-kubernetes-list-type: set + type: object + dnsType: + default: PlatformDefault + description: |- + dnsType indicates the type of DNS solution in use within the cluster. Its default value of + `PlatformDefault` indicates that the cluster's DNS is the default provided by the cloud platform. + It can be set to `ClusterHosted` to bypass the configuration of the cloud default DNS. In this mode, + the cluster needs to provide a self-hosted DNS solution for the cluster's installation to succeed. + The cluster's use of the cloud's Load Balancers is unaffected by this setting. + The value is immutable after it has been set at install time. + Currently, there is no way for the customer to add additional DNS entries into the cluster hosted DNS. + Enabling this functionality allows the user to start their own DNS solution outside the cluster after + installation is complete. The customer would be responsible for configuring this custom DNS solution, + and it can be run in addition to the in-cluster DNS solution. + enum: + - ClusterHosted + - PlatformDefault + type: string + x-kubernetes-validations: + - message: dnsType is immutable + rule: oldSelf == '' || self == oldSelf + type: object + x-kubernetes-validations: + - message: clusterHosted is permitted only when dnsType is + ClusterHosted + rule: 'has(self.dnsType) && self.dnsType != ''ClusterHosted'' + ? !has(self.clusterHosted) : true' cloudName: description: |- cloudName is the name of the Azure cloud environment which can be used to configure the Azure SDK diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_infrastructures-TechPreviewNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_infrastructures-TechPreviewNoUpgrade.crd.yaml index c45b7d6e83..02f367409b 100644 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_infrastructures-TechPreviewNoUpgrade.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_infrastructures-TechPreviewNoUpgrade.crd.yaml @@ -1127,6 +1127,8 @@ spec: and the operators should not configure the operand for highly-available operation The 'External' mode indicates that the control plane is hosted externally to the cluster and that its components are not visible within the cluster. + The 'HighlyAvailableArbiter' mode indicates that the control plane will consist of 2 control-plane nodes + that run conventional services and 1 smaller sized arbiter node that runs a bare minimum of services to maintain quorum. enum: - HighlyAvailable - HighlyAvailableArbiter diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go index 30b85b78e9..84aae76e22 100644 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go @@ -45,7 +45,7 @@ func (in *APIServerEncryption) DeepCopyInto(out *APIServerEncryption) { if in.KMS != nil { in, out := &in.KMS, &out.KMS *out = new(KMSConfig) - (*in).DeepCopyInto(*out) + **out = **in } return } @@ -216,22 +216,6 @@ func (in *AWSIngressSpec) DeepCopy() *AWSIngressSpec { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AWSKMSConfig) DeepCopyInto(out *AWSKMSConfig) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSKMSConfig. -func (in *AWSKMSConfig) DeepCopy() *AWSKMSConfig { - if in == nil { - return nil - } - out := new(AWSKMSConfig) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AWSPlatformSpec) DeepCopyInto(out *AWSPlatformSpec) { *out = *in @@ -2560,6 +2544,7 @@ func (in *GenericAPIServerConfig) DeepCopy() *GenericAPIServerConfig { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *GenericControllerConfig) DeepCopyInto(out *GenericControllerConfig) { *out = *in + out.TypeMeta = in.TypeMeta in.ServingInfo.DeepCopyInto(&out.ServingInfo) out.LeaderElection = in.LeaderElection out.Authentication = in.Authentication @@ -3832,11 +3817,7 @@ func (in *IntermediateTLSProfile) DeepCopy() *IntermediateTLSProfile { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *KMSConfig) DeepCopyInto(out *KMSConfig) { *out = *in - if in.AWS != nil { - in, out := &in.AWS, &out.AWS - *out = new(AWSKMSConfig) - **out = **in - } + out.Vault = in.Vault return } @@ -6901,6 +6882,107 @@ func (in *VSpherePlatformVCenterSpec) DeepCopy() *VSpherePlatformVCenterSpec { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VaultAppRoleAuthentication) DeepCopyInto(out *VaultAppRoleAuthentication) { + *out = *in + out.Secret = in.Secret + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VaultAppRoleAuthentication. +func (in *VaultAppRoleAuthentication) DeepCopy() *VaultAppRoleAuthentication { + if in == nil { + return nil + } + out := new(VaultAppRoleAuthentication) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VaultAuthentication) DeepCopyInto(out *VaultAuthentication) { + *out = *in + out.AppRole = in.AppRole + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VaultAuthentication. +func (in *VaultAuthentication) DeepCopy() *VaultAuthentication { + if in == nil { + return nil + } + out := new(VaultAuthentication) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VaultConfigMapReference) DeepCopyInto(out *VaultConfigMapReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VaultConfigMapReference. +func (in *VaultConfigMapReference) DeepCopy() *VaultConfigMapReference { + if in == nil { + return nil + } + out := new(VaultConfigMapReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VaultKMSConfig) DeepCopyInto(out *VaultKMSConfig) { + *out = *in + out.TLS = in.TLS + out.Authentication = in.Authentication + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VaultKMSConfig. +func (in *VaultKMSConfig) DeepCopy() *VaultKMSConfig { + if in == nil { + return nil + } + out := new(VaultKMSConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VaultSecretReference) DeepCopyInto(out *VaultSecretReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VaultSecretReference. +func (in *VaultSecretReference) DeepCopy() *VaultSecretReference { + if in == nil { + return nil + } + out := new(VaultSecretReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VaultTLSConfig) DeepCopyInto(out *VaultTLSConfig) { + *out = *in + out.CABundle = in.CABundle + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VaultTLSConfig. +func (in *VaultTLSConfig) DeepCopy() *VaultTLSConfig { + if in == nil { + return nil + } + out := new(VaultTLSConfig) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *WebhookTokenAuthenticator) DeepCopyInto(out *WebhookTokenAuthenticator) { *out = *in diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.featuregated-crd-manifests.yaml b/vendor/github.com/openshift/api/config/v1/zz_generated.featuregated-crd-manifests.yaml index 4b768c3898..75233bff73 100644 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.featuregated-crd-manifests.yaml +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.featuregated-crd-manifests.yaml @@ -7,7 +7,6 @@ apiservers.config.openshift.io: Category: "" FeatureGates: - KMSEncryption - - KMSEncryptionProvider - TLSAdherence FilenameOperatorName: config-operator FilenameOperatorOrdering: "01" @@ -206,7 +205,8 @@ dnses.config.openshift.io: CRDName: dnses.config.openshift.io Capability: "" Category: "" - FeatureGates: [] + FeatureGates: + - AWSEuropeanSovereignCloudInstall FilenameOperatorName: config-operator FilenameOperatorOrdering: "01" FilenameRunLevel: "0000_10" @@ -372,9 +372,6 @@ infrastructures.config.openshift.io: - AzureDualStackInstall - DualReplica - DyanmicServiceEndpointIBMCloud - - GCPClusterHostedDNSInstall - - HighlyAvailableArbiter - - HighlyAvailableArbiter+DualReplica - NutanixMultiSubnets - OnPremDNSRecords - VSphereHostVMGroupZonal diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go index a30061c252..f386a81125 100644 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go @@ -137,7 +137,7 @@ func (GenericAPIServerConfig) SwaggerDoc() map[string]string { } var map_GenericControllerConfig = map[string]string{ - "": "GenericControllerConfig provides information to configure a controller", + "": "GenericControllerConfig provides information to configure a controller\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "servingInfo": "servingInfo is the HTTP serving information for the controller's endpoints", "leaderElection": "leaderElection provides information to elect a leader. Only override this if you have a specific need", "authentication": "authentication allows configuration of authentication for the endpoints", @@ -459,7 +459,7 @@ func (OIDCProvider) SwaggerDoc() map[string]string { var map_PrefixedClaimMapping = map[string]string{ "": "PrefixedClaimMapping configures a claim mapping that allows for an optional prefix.", - "prefix": "prefix is an optional field that configures the prefix that will be applied to the cluster identity attribute during the process of mapping JWT claims to cluster identity attributes.\n\nWhen omitted (\"\"), no prefix is applied to the cluster identity attribute.\n\nExample: if `prefix` is set to \"myoidc:\" and the `claim` in JWT contains an array of strings \"a\", \"b\" and \"c\", the mapping will result in an array of string \"myoidc:a\", \"myoidc:b\" and \"myoidc:c\".", + "prefix": "prefix is an optional field that configures the prefix that will be applied to the cluster identity attribute during the process of mapping JWT claims to cluster identity attributes.\n\nWhen omitted or set to an empty string (\"\"), no prefix is applied to the cluster identity attribute. Must not be set to a non-empty value when expression is set.\n\nExample: if `prefix` is set to \"myoidc:\" and the `claim` in JWT contains an array of strings \"a\", \"b\" and \"c\", the mapping will result in an array of string \"myoidc:a\", \"myoidc:b\" and \"myoidc:c\".", } func (PrefixedClaimMapping) SwaggerDoc() map[string]string { @@ -550,7 +550,7 @@ func (TokenUserValidationRule) SwaggerDoc() map[string]string { var map_UsernameClaimMapping = map[string]string{ "claim": "claim is an optional field that configures the JWT token claim whose value is assigned to the cluster identity field associated with this mapping. claim is required when the ExternalOIDCWithUpstreamParity feature gate is not enabled. When the ExternalOIDCWithUpstreamParity feature gate is enabled, claim must not be set when expression is set.\n\nclaim must not be an empty string (\"\") and must not exceed 256 characters.", "expression": "expression is an optional CEL expression used to derive the username from JWT claims.\n\nCEL expressions have access to the token claims through a CEL variable, 'claims'.\n\nexpression must be at least 1 character and must not exceed 1024 characters in length. expression must not be set when claim is set.", - "prefixPolicy": "prefixPolicy is an optional field that configures how a prefix should be applied to the value of the JWT claim specified in the 'claim' field.\n\nAllowed values are 'Prefix', 'NoPrefix', and omitted (not provided or an empty string).\n\nWhen set to 'Prefix', the value specified in the prefix field will be prepended to the value of the JWT claim.\n\nThe prefix field must be set when prefixPolicy is 'Prefix'.\n\nWhen set to 'NoPrefix', no prefix will be prepended to the value of the JWT claim.\n\nWhen omitted, this means no opinion and the platform is left to choose any prefixes that are applied which is subject to change over time. Currently, the platform prepends `{issuerURL}#` to the value of the JWT claim when the claim is not 'email'.\n\nAs an example, consider the following scenario:\n\n `prefix` is unset, `issuerURL` is set to `https://myoidc.tld`,\n the JWT claims include \"username\":\"userA\" and \"email\":\"userA@myoidc.tld\",\n and `claim` is set to:\n - \"username\": the mapped value will be \"https://myoidc.tld#userA\"\n - \"email\": the mapped value will be \"userA@myoidc.tld\"", + "prefixPolicy": "prefixPolicy is an optional field that configures how a prefix should be applied to the value of the JWT claim specified in the 'claim' field.\n\nAllowed values are 'Prefix', 'NoPrefix', and omitted (not provided or an empty string).\n\nWhen set to 'Prefix', the value specified in the prefix field will be prepended to the value of the JWT claim. The prefix field must be set when prefixPolicy is 'Prefix'. Must not be set to 'Prefix' when expression is set. When set to 'NoPrefix', no prefix will be prepended to the value of the JWT claim. When omitted, this means no opinion and the platform is left to choose any prefixes that are applied which is subject to change over time. Currently, the platform prepends `{issuerURL}#` to the value of the JWT claim when the claim is not 'email'.\n\nAs an example, consider the following scenario:\n\n `prefix` is unset, `issuerURL` is set to `https://myoidc.tld`,\n the JWT claims include \"username\":\"userA\" and \"email\":\"userA@myoidc.tld\",\n and `claim` is set to:\n - \"username\": the mapped value will be \"https://myoidc.tld#userA\"\n - \"email\": the mapped value will be \"userA@myoidc.tld\"", "prefix": "prefix configures the prefix that should be prepended to the value of the JWT claim.\n\nprefix must be set when prefixPolicy is set to 'Prefix' and must be unset otherwise.", } @@ -988,7 +988,7 @@ func (ConsoleStatus) SwaggerDoc() map[string]string { var map_AWSDNSSpec = map[string]string{ "": "AWSDNSSpec contains DNS configuration specific to the Amazon Web Services cloud provider.", - "privateZoneIAMRole": "privateZoneIAMRole contains the ARN of an IAM role that should be assumed when performing operations on the cluster's private hosted zone specified in the cluster DNS config. When left empty, no role should be assumed.", + "privateZoneIAMRole": "privateZoneIAMRole contains the ARN of an IAM role that should be assumed when performing operations on the cluster's private hosted zone specified in the cluster DNS config. When left empty, no role should be assumed.\n\nThe ARN must follow the format: arn::iam:::role/, where: is the AWS partition (aws, aws-cn, aws-us-gov, or aws-eusc), is a 12-digit numeric identifier for the AWS account, is the IAM role name.", } func (AWSDNSSpec) SwaggerDoc() map[string]string { @@ -1754,7 +1754,7 @@ var map_InfrastructureStatus = map[string]string{ "etcdDiscoveryDomain": "etcdDiscoveryDomain is the domain used to fetch the SRV records for discovering etcd servers and clients. For more info: https://github.com/etcd-io/etcd/blob/329be66e8b3f9e2e6af83c123ff89297e49ebd15/Documentation/op-guide/clustering.md#dns-discovery deprecated: as of 4.7, this field is no longer set or honored. It will be removed in a future release.", "apiServerURL": "apiServerURL is a valid URI with scheme 'https', address and optionally a port (defaulting to 443). apiServerURL can be used by components like the web console to tell users where to find the Kubernetes API.", "apiServerInternalURI": "apiServerInternalURL is a valid URI with scheme 'https', address and optionally a port (defaulting to 443). apiServerInternalURL can be used by components like kubelets, to contact the Kubernetes API server using the infrastructure provider rather than Kubernetes networking.", - "controlPlaneTopology": "controlPlaneTopology expresses the expectations for operands that normally run on control nodes. The default is 'HighlyAvailable', which represents the behavior operators have in a \"normal\" cluster. The 'SingleReplica' mode will be used in single-node deployments and the operators should not configure the operand for highly-available operation The 'External' mode indicates that the control plane is hosted externally to the cluster and that its components are not visible within the cluster.", + "controlPlaneTopology": "controlPlaneTopology expresses the expectations for operands that normally run on control nodes. The default is 'HighlyAvailable', which represents the behavior operators have in a \"normal\" cluster. The 'SingleReplica' mode will be used in single-node deployments and the operators should not configure the operand for highly-available operation The 'External' mode indicates that the control plane is hosted externally to the cluster and that its components are not visible within the cluster. The 'HighlyAvailableArbiter' mode indicates that the control plane will consist of 2 control-plane nodes that run conventional services and 1 smaller sized arbiter node that runs a bare minimum of services to maintain quorum.", "infrastructureTopology": "infrastructureTopology expresses the expectations for infrastructure services that do not run on control plane nodes, usually indicated by a node selector for a `role` value other than `master`. The default is 'HighlyAvailable', which represents the behavior operators have in a \"normal\" cluster. The 'SingleReplica' mode will be used in single-node deployments and the operators should not configure the operand for highly-available operation NOTE: External topology mode is not applicable for this field.", "cpuPartitioning": "cpuPartitioning expresses if CPU partitioning is a currently enabled feature in the cluster. CPU Partitioning means that this cluster can support partitioning workloads to specific CPU Sets. Valid values are \"None\" and \"AllNodes\". When omitted, the default value is \"None\". The default value of \"None\" indicates that no nodes will be setup with CPU partitioning. The \"AllNodes\" value indicates that all nodes have been setup with CPU partitioning, and can then be further configured via the PerformanceProfile API.", } @@ -2329,24 +2329,76 @@ func (Storage) SwaggerDoc() map[string]string { return map_Storage } -var map_AWSKMSConfig = map[string]string{ - "": "AWSKMSConfig defines the KMS config specific to AWS KMS provider", - "keyARN": "keyARN specifies the Amazon Resource Name (ARN) of the AWS KMS key used for encryption. The value must adhere to the format `arn:aws:kms:::key/`, where: - `` is the AWS region consisting of lowercase letters and hyphens followed by a number. - `` is a 12-digit numeric identifier for the AWS account. - `` is a unique identifier for the KMS key, consisting of lowercase hexadecimal characters and hyphens.", - "region": "region specifies the AWS region where the KMS instance exists, and follows the format `--`, e.g.: `us-east-1`. Only lowercase letters and hyphens followed by numbers are allowed.", +var map_KMSConfig = map[string]string{ + "": "KMSConfig defines the configuration for the KMS instance that will be used with KMS encryption", + "type": "type defines the kind of platform for the KMS provider. Allowed values are Vault. When set to Vault, the plugin connects to a HashiCorp Vault server for key management.", + "vault": "vault defines the configuration for the Vault KMS plugin. The plugin connects to a Vault Enterprise server that is managed by the user outside the purview of the control plane. This field must be set when type is Vault, and must be unset otherwise.", } -func (AWSKMSConfig) SwaggerDoc() map[string]string { - return map_AWSKMSConfig +func (KMSConfig) SwaggerDoc() map[string]string { + return map_KMSConfig } -var map_KMSConfig = map[string]string{ - "": "KMSConfig defines the configuration for the KMS instance that will be used with KMSEncryptionProvider encryption", - "type": "type defines the kind of platform for the KMS provider. Available provider types are AWS only.", - "aws": "aws defines the key config for using an AWS KMS instance for the encryption. The AWS KMS instance is managed by the user outside the purview of the control plane.", +var map_VaultAppRoleAuthentication = map[string]string{ + "": "VaultAppRoleAuthentication defines the configuration for AppRole authentication with Vault.", + "secret": "secret references a secret in the openshift-config namespace containing the AppRole credentials used to authenticate with Vault. The secret must contain two keys: \"roleID\" for the AppRole Role ID and \"secretID\" for the AppRole Secret ID.\n\nThe namespace for the secret is openshift-config.", } -func (KMSConfig) SwaggerDoc() map[string]string { - return map_KMSConfig +func (VaultAppRoleAuthentication) SwaggerDoc() map[string]string { + return map_VaultAppRoleAuthentication +} + +var map_VaultAuthentication = map[string]string{ + "": "VaultAuthentication defines the authentication method used to authenticate with Vault.", + "type": "type defines the authentication method used to authenticate with Vault. Allowed values are AppRole. When set to AppRole, the plugin uses AppRole credentials to authenticate with Vault.", + "appRole": "appRole defines the configuration for AppRole authentication. This field must be set when type is AppRole, and must be unset otherwise.", +} + +func (VaultAuthentication) SwaggerDoc() map[string]string { + return map_VaultAuthentication +} + +var map_VaultConfigMapReference = map[string]string{ + "": "VaultConfigMapReference references a ConfigMap in the openshift-config namespace.", + "name": "name is the metadata.name of the referenced ConfigMap in the openshift-config namespace. The name must be a valid DNS subdomain name: it must contain no more than 253 characters, contain only lowercase alphanumeric characters, '-' or '.', and start and end with an alphanumeric character.", +} + +func (VaultConfigMapReference) SwaggerDoc() map[string]string { + return map_VaultConfigMapReference +} + +var map_VaultKMSConfig = map[string]string{ + "": "VaultKMSConfig defines the KMS plugin configuration specific to Vault KMS", + "kmsPluginImage": "kmsPluginImage specifies the container image for the HashiCorp Vault KMS plugin.\n\nThe image must be a fully qualified OCI image pull spec with a SHA256 digest. The format is: host[:port][/namespace]/name@sha256: where the digest must be 64 characters long and consist only of lowercase hexadecimal characters, a-f and 0-9. The total length must be between 75 and 447 characters.\n\nShort names (e.g., \"vault-plugin\" or \"hashicorp/vault-plugin\") are not allowed. The registry hostname must be included and must contain at least one dot. Image tags (e.g., \":latest\", \":v1.0.0\") are not allowed.\n\nConsult the OpenShift documentation for compatible plugin versions with your cluster version, then obtain the image digest for that version from HashiCorp's container registry.\n\nFor disconnected environments, mirror the plugin image to an accessible registry and reference the mirrored location with its digest.", + "vaultAddress": "vaultAddress specifies the address of the HashiCorp Vault instance. The value must be a valid HTTPS URL containing only scheme, host, and optional port. Paths, user info, query parameters, and fragments are not allowed.\n\nFormat: https://hostname[:port] Example: https://vault.example.com:8200\n\nThe value must be between 1 and 512 characters.", + "vaultNamespace": "vaultNamespace specifies the Vault namespace where the Transit secrets engine is mounted. This is only applicable for Vault Enterprise installations. When this field is not set, no namespace is used.\n\nThe value must be between 1 and 4096 characters. The namespace cannot end with a forward slash, cannot contain spaces, and cannot be one of the reserved strings: root, sys, audit, auth, cubbyhole, or identity.", + "tls": "tls contains the TLS configuration for connecting to the Vault server. When this field is not set, system default TLS settings are used.", + "authentication": "authentication defines the authentication method used to authenticate with Vault.", + "transitMount": "transitMount specifies the mount path of the Vault Transit engine. The value must be between 1 and 1024 characters when specified.\n\nWhen omitted, this means the user has no opinion and the platform is left to choose a reasonable default. These defaults are subject to change over time. The current default is \"transit\".\n\nThe mount path cannot start or end with a forward slash, cannot contain spaces, and cannot contain consecutive forward slashes.", + "transitKey": "transitKey specifies the name of the encryption key in Vault's Transit engine. This key is used to encrypt and decrypt data.\n\nThe key name must be between 1 and 512 characters and cannot contain spaces or forward slashes.", +} + +func (VaultKMSConfig) SwaggerDoc() map[string]string { + return map_VaultKMSConfig +} + +var map_VaultSecretReference = map[string]string{ + "": "VaultSecretReference references a secret in the openshift-config namespace.", + "name": "name is the metadata.name of the referenced secret in the openshift-config namespace. The name must be a valid DNS subdomain name: it must contain no more than 253 characters, contain only lowercase alphanumeric characters, '-' or '.', and start and end with an alphanumeric character.", +} + +func (VaultSecretReference) SwaggerDoc() map[string]string { + return map_VaultSecretReference +} + +var map_VaultTLSConfig = map[string]string{ + "": "VaultTLSConfig contains TLS configuration for connecting to Vault.", + "caBundle": "caBundle references a ConfigMap in the openshift-config namespace containing the CA certificate bundle used to verify the TLS connection to the Vault server. The ConfigMap must contain the CA bundle in the key \"ca-bundle.crt\". When this field is not set, the system's trusted CA certificates are used.\n\nThe namespace for the ConfigMap is openshift-config.\n\nExample ConfigMap:\n apiVersion: v1\n kind: ConfigMap\n metadata:\n name: vault-ca-bundle\n namespace: openshift-config\n data:\n ca-bundle.crt: |", + "serverName": "serverName specifies the Server Name Indication (SNI) to use when connecting to Vault via TLS. This is useful when the Vault server's hostname doesn't match its TLS certificate. When this field is not set, the hostname from vaultAddress is used for SNI.\n\nThe value must be a valid DNS hostname: it must contain no more than 253 characters, contain only lowercase alphanumeric characters, '-' or '.', and start and end with an alphanumeric character.", +} + +func (VaultTLSConfig) SwaggerDoc() map[string]string { + return map_VaultTLSConfig } var map_ClusterNetworkEntry = map[string]string{ diff --git a/vendor/github.com/openshift/api/config/v1alpha1/types_cluster_monitoring.go b/vendor/github.com/openshift/api/config/v1alpha1/types_cluster_monitoring.go index 48ca1aed8a..083c2d6b55 100644 --- a/vendor/github.com/openshift/api/config/v1alpha1/types_cluster_monitoring.go +++ b/vendor/github.com/openshift/api/config/v1alpha1/types_cluster_monitoring.go @@ -126,6 +126,38 @@ type ClusterMonitoringSpec struct { // When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time. // +optional OpenShiftStateMetricsConfig OpenShiftStateMetricsConfig `json:"openShiftStateMetricsConfig,omitempty,omitzero"` + // telemeterClientConfig is an optional field that can be used to configure the Telemeter Client + // component that runs in the openshift-monitoring namespace. The Telemeter Client collects + // selected monitoring metrics and forwards them to Red Hat for telemetry purposes. + // When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time. + // When set, at least one field must be specified within telemeterClientConfig. + // +optional + TelemeterClientConfig TelemeterClientConfig `json:"telemeterClientConfig,omitempty,omitzero"` + // thanosQuerierConfig is an optional field that can be used to configure the Thanos Querier + // component that runs in the openshift-monitoring namespace. The Thanos Querier provides + // a global query view by aggregating and deduplicating metrics from multiple Prometheus instances. + // When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time. + // The current default deploys the Thanos Querier on linux nodes with 5m CPU and 12Mi memory + // requests, and no custom tolerations or topology spread constraints. + // When set, at least one field must be specified within thanosQuerierConfig. + // +optional + ThanosQuerierConfig ThanosQuerierConfig `json:"thanosQuerierConfig,omitempty,omitzero"` + // nodeExporterConfig is an optional field that can be used to configure the node-exporter agent + // that runs as a DaemonSet in the openshift-monitoring namespace. The node-exporter agent collects + // hardware and OS-level metrics from every node in the cluster. + // When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time. + // +optional + NodeExporterConfig NodeExporterConfig `json:"nodeExporterConfig,omitempty,omitzero"` + // monitoringPluginConfig is an optional field that can be used to configure the monitoring plugin + // that runs as a dynamic plugin of the OpenShift web console. The monitoring plugin provides + // the monitoring UI in the OpenShift web console for visualizing metrics, alerts, and dashboards. + // When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time. + // The current default deploys the monitoring-plugin as a single-replica Deployment + // on linux nodes with 10m CPU and 50Mi memory requests, and no custom tolerations + // or topology spread constraints. + // When set, at least one field must be specified within monitoringPluginConfig. + // +optional + MonitoringPluginConfig MonitoringPluginConfig `json:"monitoringPluginConfig,omitempty,omitzero"` } // OpenShiftStateMetricsConfig provides configuration options for the openshift-state-metrics agent @@ -158,13 +190,13 @@ type OpenShiftStateMetricsConfig struct { // - name: memory // request: 32Mi // limit: null - // Maximum length for this list is 10. + // Maximum length for this list is 5. // Minimum length for this list is 1. // Each resource name must be unique within this list. // +optional // +listType=map // +listMapKey=name - // +kubebuilder:validation:MaxItems=10 + // +kubebuilder:validation:MaxItems=5 // +kubebuilder:validation:MinItems=1 Resources []ContainerResource `json:"resources,omitempty"` // tolerations defines tolerations for the pods. @@ -201,6 +233,506 @@ type OpenShiftStateMetricsConfig struct { TopologySpreadConstraints []v1.TopologySpreadConstraint `json:"topologySpreadConstraints,omitempty"` } +// NodeExporterConfig provides configuration options for the node-exporter agent +// that runs as a DaemonSet in the `openshift-monitoring` namespace. The node-exporter agent collects +// hardware and OS-level metrics from every node in the cluster, including CPU, memory, disk, and +// network statistics. +// At least one field must be specified. +// +kubebuilder:validation:MinProperties=1 +type NodeExporterConfig struct { + // nodeSelector defines the nodes on which the Pods are scheduled. + // nodeSelector is optional. + // + // When omitted, this means the user has no opinion and the platform is left + // to choose reasonable defaults. These defaults are subject to change over time. + // The current default value is `kubernetes.io/os: linux`. + // When specified, nodeSelector must contain at least 1 entry and must not contain more than 10 entries. + // +optional + // +kubebuilder:validation:MinProperties=1 + // +kubebuilder:validation:MaxProperties=10 + NodeSelector map[string]string `json:"nodeSelector,omitempty"` + // resources defines the compute resource requests and limits for the node-exporter container. + // This includes CPU, memory and HugePages constraints to help control scheduling and resource usage. + // When not specified, defaults are used by the platform. Requests cannot exceed limits. + // This field is optional. + // More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + // This is a simplified API that maps to Kubernetes ResourceRequirements. + // The current default values are: + // resources: + // - name: cpu + // request: 8m + // limit: null + // - name: memory + // request: 32Mi + // limit: null + // --- + // maxItems is set to 5 to stay within the Kubernetes CRD CEL validation cost budget. + // See the MaxItems comment near the ContainerResource type definition for details. + // Minimum length for this list is 1. + // Each resource name must be unique within this list. + // +optional + // +listType=map + // +listMapKey=name + // +kubebuilder:validation:MaxItems=5 + // +kubebuilder:validation:MinItems=1 + Resources []ContainerResource `json:"resources,omitempty"` + // tolerations defines tolerations for the pods. + // tolerations is optional. + // + // When omitted, this means the user has no opinion and the platform is left + // to choose reasonable defaults. These defaults are subject to change over time. + // The current default is to tolerate all taints (operator: Exists without any key), + // which is typical for DaemonSets that must run on every node. + // Maximum length for this list is 10. + // Minimum length for this list is 1. + // +kubebuilder:validation:MaxItems=10 + // +kubebuilder:validation:MinItems=1 + // +listType=atomic + // +optional + Tolerations []v1.Toleration `json:"tolerations,omitempty"` + // collectors configures which node-exporter metric collectors are enabled. + // collectors is optional. + // Each collector can be individually enabled or disabled. Some collectors may have + // additional configuration options. + // + // When omitted, this means no opinion and the platform is left to choose a reasonable + // default, which is subject to change over time. + // +optional + Collectors NodeExporterCollectorConfig `json:"collectors,omitempty,omitzero"` + // maxProcs sets the target number of CPUs on which the node-exporter process will run. + // maxProcs is optional. + // Use this setting to override the default value, which is set either to 4 or to the number + // of CPUs on the host, whichever is smaller. + // The default value is computed at runtime and set via the GOMAXPROCS environment variable before + // node-exporter is launched. + // If a kernel deadlock occurs or if performance degrades when reading from sysfs concurrently, + // you can change this value to 1, which limits node-exporter to running on one CPU. + // For nodes with a high CPU count, setting the limit to a low number saves resources by preventing + // Go routines from being scheduled to run on all CPUs. However, I/O performance degrades if the + // maxProcs value is set too low and there are many metrics to collect. + // The minimum value is 1 and the maximum value is 1024. + // When omitted, this means no opinion and the platform is left to choose a reasonable default, + // which is subject to change over time. The current default is min(4, number of host CPUs). + // +optional + // +kubebuilder:validation:Minimum=1 + // +kubebuilder:validation:Maximum=1024 + MaxProcs int32 `json:"maxProcs,omitempty"` + // ignoredNetworkDevices is a list of regular expression patterns that match network devices + // to be excluded from the relevant collector configuration such as netdev, netclass, and ethtool. + // ignoredNetworkDevices is optional. + // + // When omitted, the Cluster Monitoring Operator uses a predefined list of devices to be excluded + // to minimize the impact on memory usage. + // When set as an empty list, no devices are excluded. + // If you modify this setting, monitor the prometheus-k8s deployment closely for excessive memory usage. + // Maximum length for this list is 50. + // Each entry must be at least 1 character and at most 1024 characters long. + // +kubebuilder:validation:MaxItems=50 + // +kubebuilder:validation:MinItems=0 + // +listType=set + // +optional + IgnoredNetworkDevices *[]NodeExporterIgnoredNetworkDevice `json:"ignoredNetworkDevices,omitempty"` +} + +// NodeExporterIgnoredNetworkDevice is a string that is interpreted as a Go regular expression +// pattern by the controller to match network device names to exclude from node-exporter +// metric collection for collectors such as netdev, netclass, and ethtool. +// Invalid regular expressions will cause a controller-level error at runtime. +// Must be at least 1 character and at most 1024 characters. +// +kubebuilder:validation:MinLength=1 +// +kubebuilder:validation:MaxLength=1024 +type NodeExporterIgnoredNetworkDevice string + +// NodeExporterCollectorCollectionPolicy declares whether a node-exporter collector should collect metrics. +// Valid values are "Collect" and "DoNotCollect". +// +kubebuilder:validation:Enum=Collect;DoNotCollect +// +enum +type NodeExporterCollectorCollectionPolicy string + +const ( + // NodeExporterCollectorCollectionPolicyCollect means the collector is active and will produce metrics. + NodeExporterCollectorCollectionPolicyCollect NodeExporterCollectorCollectionPolicy = "Collect" + // NodeExporterCollectorCollectionPolicyDoNotCollect means the collector is inactive and will not produce metrics. + NodeExporterCollectorCollectionPolicyDoNotCollect NodeExporterCollectorCollectionPolicy = "DoNotCollect" +) + +// NodeExporterNetclassStatsGatherer identifies how the netclass collector gathers device statistics +// (for example via sysfs or netlink, as implemented in node_exporter). +// Valid values are "Sysfs" and "Netlink". +// +kubebuilder:validation:Enum=Sysfs;Netlink +// +enum +type NodeExporterNetclassStatsGatherer string + +const ( + // NodeExporterNetclassStatsGathererSysfs uses the sysfs-based implementation. + NodeExporterNetclassStatsGathererSysfs NodeExporterNetclassStatsGatherer = "Sysfs" + // NodeExporterNetclassStatsGathererNetlink uses the netlink-based implementation. + NodeExporterNetclassStatsGathererNetlink NodeExporterNetclassStatsGatherer = "Netlink" +) + +// NodeExporterCollectorConfig defines settings for individual collectors +// of the node-exporter agent. Each collector can be individually set to collect or not collect metrics. +// At least one collector must be specified. +// +kubebuilder:validation:MinProperties=1 +type NodeExporterCollectorConfig struct { + // cpuFreq configures the cpufreq collector, which collects CPU frequency statistics. + // cpuFreq is optional. + // When omitted, this means no opinion and the platform is left to choose a reasonable default, + // which is subject to change over time. The current default is disabled. + // Consider enabling when you need to observe CPU frequency scaling; expect higher CPU usage on + // many-core nodes when collectionPolicy is Collect. + // +optional + CpuFreq NodeExporterCollectorCpufreqConfig `json:"cpuFreq,omitempty,omitzero"` + // tcpStat configures the tcpstat collector, which collects TCP connection statistics. + // tcpStat is optional. + // When omitted, this means no opinion and the platform is left to choose a reasonable default, + // which is subject to change over time. The current default is disabled. + // Enable when debugging TCP connection behavior or capacity at the node level. + // +optional + TcpStat NodeExporterCollectorTcpStatConfig `json:"tcpStat,omitempty,omitzero"` + // ethtool configures the ethtool collector, which collects ethernet device statistics. + // ethtool is optional. + // When omitted, this means no opinion and the platform is left to choose a reasonable default, + // which is subject to change over time. The current default is disabled. + // Enable when you need NIC driver-level ethtool metrics beyond generic netdev counters. + // +optional + Ethtool NodeExporterCollectorEthtoolConfig `json:"ethtool,omitempty,omitzero"` + // netDev configures the netdev collector, which collects network device statistics. + // netDev is optional. + // When omitted, this means no opinion and the platform is left to choose a reasonable default, + // which is subject to change over time. The current default is enabled. + // Turn off if you must reduce per-interface metric cardinality on hosts with many virtual interfaces. + // +optional + NetDev NodeExporterCollectorNetDevConfig `json:"netDev,omitempty,omitzero"` + // netClass configures the netclass collector, which collects information about network devices. + // netClass is optional. + // When omitted, this means no opinion and the platform is left to choose a reasonable default, + // which is subject to change over time. The current default is enabled with netlink mode active. + // Use statsGatherer when sysfs vs netlink implementation matters or when matching node_exporter tuning. + // +optional + NetClass NodeExporterCollectorNetClassConfig `json:"netClass,omitempty,omitzero"` + // buddyInfo configures the buddyinfo collector, which collects statistics about memory + // fragmentation from the node_buddyinfo_blocks metric. This metric collects data from /proc/buddyinfo. + // buddyInfo is optional. + // When omitted, this means no opinion and the platform is left to choose a reasonable default, + // which is subject to change over time. The current default is disabled. + // Enable when investigating kernel memory fragmentation; typically for advanced troubleshooting only. + // +optional + BuddyInfo NodeExporterCollectorBuddyInfoConfig `json:"buddyInfo,omitempty,omitzero"` + // mountStats configures the mountstats collector, which collects statistics about NFS volume + // I/O activities. + // mountStats is optional. + // When omitted, this means no opinion and the platform is left to choose a reasonable default, + // which is subject to change over time. The current default is disabled. + // Enabling this collector may produce metrics with high cardinality. If you enable this + // collector, closely monitor the prometheus-k8s deployment for excessive memory usage. + // Enable when you care about per-mount NFS client statistics. + // +optional + MountStats NodeExporterCollectorMountStatsConfig `json:"mountStats,omitempty,omitzero"` + // ksmd configures the ksmd collector, which collects statistics from the kernel same-page + // merger daemon. + // ksmd is optional. + // When omitted, this means no opinion and the platform is left to choose a reasonable default, + // which is subject to change over time. The current default is disabled. + // Enable on nodes where KSM is in use and you want visibility into merging activity. + // +optional + Ksmd NodeExporterCollectorKSMDConfig `json:"ksmd,omitempty,omitzero"` + // processes configures the processes collector, which collects statistics from processes and + // threads running in the system. + // processes is optional. + // When omitted, this means no opinion and the platform is left to choose a reasonable default, + // which is subject to change over time. The current default is disabled. + // Enable for process/thread-level insight; can be expensive on busy nodes. + // +optional + Processes NodeExporterCollectorProcessesConfig `json:"processes,omitempty,omitzero"` + // systemd configures the systemd collector, which collects statistics on the systemd daemon + // and its managed services. + // systemd is optional. + // When omitted, this means no opinion and the platform is left to choose a reasonable default, + // which is subject to change over time. The current default is disabled. + // Enabling this collector with a long list of selected units may produce metrics with high + // cardinality. If you enable this collector, closely monitor the prometheus-k8s deployment + // for excessive memory usage. + // Enable when you need metrics for specific units; scope units carefully. + // +optional + Systemd NodeExporterCollectorSystemdConfig `json:"systemd,omitempty,omitzero"` +} + +// NodeExporterCollectorCpufreqConfig provides configuration for the cpufreq collector +// of the node-exporter agent. The cpufreq collector collects CPU frequency statistics. +// It is disabled by default. +type NodeExporterCollectorCpufreqConfig struct { + // collectionPolicy declares whether the cpufreq collector collects metrics. + // This field is required. + // Valid values are "Collect" and "DoNotCollect". + // When set to "Collect", the cpufreq collector is active and CPU frequency statistics are collected. + // When set to "DoNotCollect", the cpufreq collector is inactive. + // +required + CollectionPolicy NodeExporterCollectorCollectionPolicy `json:"collectionPolicy,omitempty"` +} + +// NodeExporterCollectorTcpStatConfig provides configuration for the tcpstat collector +// of the node-exporter agent. The tcpstat collector collects TCP connection statistics. +// It is disabled by default. +type NodeExporterCollectorTcpStatConfig struct { + // collectionPolicy declares whether the tcpstat collector collects metrics. + // This field is required. + // Valid values are "Collect" and "DoNotCollect". + // When set to "Collect", the tcpstat collector is active and TCP connection statistics are collected. + // When set to "DoNotCollect", the tcpstat collector is inactive. + // +required + CollectionPolicy NodeExporterCollectorCollectionPolicy `json:"collectionPolicy,omitempty"` +} + +// NodeExporterCollectorEthtoolConfig provides configuration for the ethtool collector +// of the node-exporter agent. The ethtool collector collects ethernet device statistics. +// It is disabled by default. +type NodeExporterCollectorEthtoolConfig struct { + // collectionPolicy declares whether the ethtool collector collects metrics. + // This field is required. + // Valid values are "Collect" and "DoNotCollect". + // When set to "Collect", the ethtool collector is active and ethernet device statistics are collected. + // When set to "DoNotCollect", the ethtool collector is inactive. + // +required + CollectionPolicy NodeExporterCollectorCollectionPolicy `json:"collectionPolicy,omitempty"` +} + +// NodeExporterCollectorNetDevConfig provides configuration for the netdev collector +// of the node-exporter agent. The netdev collector collects network device statistics +// such as bytes, packets, errors, and drops per device. +// It is enabled by default. +type NodeExporterCollectorNetDevConfig struct { + // collectionPolicy declares whether the netdev collector collects metrics. + // This field is required. + // Valid values are "Collect" and "DoNotCollect". + // When set to "Collect", the netdev collector is active and network device statistics are collected. + // When set to "DoNotCollect", the netdev collector is inactive and the corresponding metrics become unavailable. + // +required + CollectionPolicy NodeExporterCollectorCollectionPolicy `json:"collectionPolicy,omitempty"` +} + +// NodeExporterCollectorNetClassConfig provides configuration for the netclass collector +// of the node-exporter agent. The netclass collector collects information about network devices +// such as network speed, MTU, and carrier status. +// It is enabled by default. +// When collectionPolicy is DoNotCollect, the collect field must not be set. +// +kubebuilder:validation:XValidation:rule="has(self.collectionPolicy) && self.collectionPolicy == 'Collect' ? true : !has(self.collect)",message="collect is forbidden when collectionPolicy is not Collect" +// +union +type NodeExporterCollectorNetClassConfig struct { + // collectionPolicy declares whether the netclass collector collects metrics. + // This field is required. + // Valid values are "Collect" and "DoNotCollect". + // When set to "Collect", the netclass collector is active and network class information is collected. + // When set to "DoNotCollect", the netclass collector is inactive and the corresponding metrics become unavailable. + // When set to "DoNotCollect", the collect field must not be set. + // +unionDiscriminator + // +required + CollectionPolicy NodeExporterCollectorCollectionPolicy `json:"collectionPolicy,omitempty"` + // collect contains configuration options that apply only when the netclass collector is actively collecting metrics + // (i.e. when collectionPolicy is Collect). + // collect is optional and may be omitted even when collectionPolicy is Collect. + // collect may only be set when collectionPolicy is Collect. + // When set, at least one field must be specified within collect. + // +unionMember + // +optional + Collect NodeExporterCollectorNetClassCollectConfig `json:"collect,omitzero,omitempty"` +} + +// NodeExporterCollectorNetClassCollectConfig holds configuration options for the netclass collector +// when it is actively collecting metrics. At least one field must be specified. +// +kubebuilder:validation:MinProperties=1 +type NodeExporterCollectorNetClassCollectConfig struct { + // statsGatherer selects which implementation the netclass collector uses to gather statistics (sysfs or netlink). + // statsGatherer is optional. + // Valid values are "Sysfs" and "Netlink". + // When set to "Netlink", the netlink implementation is used; when set to "Sysfs", the sysfs implementation is used. + // When omitted, this means no opinion and the platform is left to choose a reasonable default, + // which is subject to change over time. The current default is Netlink. + // +optional + StatsGatherer NodeExporterNetclassStatsGatherer `json:"statsGatherer,omitempty"` +} + +// NodeExporterCollectorBuddyInfoConfig provides configuration for the buddyinfo collector +// of the node-exporter agent. The buddyinfo collector collects statistics about memory fragmentation +// from the node_buddyinfo_blocks metric using data from /proc/buddyinfo. +// It is disabled by default. +type NodeExporterCollectorBuddyInfoConfig struct { + // collectionPolicy declares whether the buddyinfo collector collects metrics. + // This field is required. + // Valid values are "Collect" and "DoNotCollect". + // When set to "Collect", the buddyinfo collector is active and memory fragmentation statistics are collected. + // When set to "DoNotCollect", the buddyinfo collector is inactive. + // +required + CollectionPolicy NodeExporterCollectorCollectionPolicy `json:"collectionPolicy,omitempty"` +} + +// NodeExporterCollectorMountStatsConfig provides configuration for the mountstats collector +// of the node-exporter agent. The mountstats collector collects statistics about NFS volume I/O activities. +// It is disabled by default. +// Enabling this collector may produce metrics with high cardinality. If you enable this +// collector, closely monitor the prometheus-k8s deployment for excessive memory usage. +type NodeExporterCollectorMountStatsConfig struct { + // collectionPolicy declares whether the mountstats collector collects metrics. + // This field is required. + // Valid values are "Collect" and "DoNotCollect". + // When set to "Collect", the mountstats collector is active and NFS volume I/O statistics are collected. + // When set to "DoNotCollect", the mountstats collector is inactive. + // +required + CollectionPolicy NodeExporterCollectorCollectionPolicy `json:"collectionPolicy,omitempty"` +} + +// NodeExporterCollectorKSMDConfig provides configuration for the ksmd collector +// of the node-exporter agent. The ksmd collector collects statistics from the kernel +// same-page merger daemon. +// It is disabled by default. +type NodeExporterCollectorKSMDConfig struct { + // collectionPolicy declares whether the ksmd collector collects metrics. + // This field is required. + // Valid values are "Collect" and "DoNotCollect". + // When set to "Collect", the ksmd collector is active and kernel same-page merger statistics are collected. + // When set to "DoNotCollect", the ksmd collector is inactive. + // +required + CollectionPolicy NodeExporterCollectorCollectionPolicy `json:"collectionPolicy,omitempty"` +} + +// NodeExporterCollectorProcessesConfig provides configuration for the processes collector +// of the node-exporter agent. The processes collector collects statistics from processes and threads +// running in the system. +// It is disabled by default. +type NodeExporterCollectorProcessesConfig struct { + // collectionPolicy declares whether the processes collector collects metrics. + // This field is required. + // Valid values are "Collect" and "DoNotCollect". + // When set to "Collect", the processes collector is active and process/thread statistics are collected. + // When set to "DoNotCollect", the processes collector is inactive. + // +required + CollectionPolicy NodeExporterCollectorCollectionPolicy `json:"collectionPolicy,omitempty"` +} + +// NodeExporterCollectorSystemdConfig provides configuration for the systemd collector +// of the node-exporter agent. The systemd collector collects statistics on the systemd daemon +// and its managed services. +// It is disabled by default. +// Enabling this collector with a long list of selected units may produce metrics with high +// cardinality. If you enable this collector, closely monitor the prometheus-k8s deployment +// for excessive memory usage. +// When collectionPolicy is DoNotCollect, the collect field must not be set. +// +kubebuilder:validation:XValidation:rule="has(self.collectionPolicy) && self.collectionPolicy == 'Collect' ? true : !has(self.collect)",message="collect is forbidden when collectionPolicy is not Collect" +// +union +type NodeExporterCollectorSystemdConfig struct { + // collectionPolicy declares whether the systemd collector collects metrics. + // This field is required. + // Valid values are "Collect" and "DoNotCollect". + // When set to "Collect", the systemd collector is active and systemd unit statistics are collected. + // When set to "DoNotCollect", the systemd collector is inactive and the collect field must not be set. + // +unionDiscriminator + // +required + CollectionPolicy NodeExporterCollectorCollectionPolicy `json:"collectionPolicy,omitempty"` + // collect contains configuration options that apply only when the systemd collector is actively collecting metrics + // (i.e. when collectionPolicy is Collect). + // collect is optional and may be omitted even when collectionPolicy is Collect. + // collect may only be set when collectionPolicy is Collect. + // When set, at least one field must be specified within collect. + // +unionMember + // +optional + Collect NodeExporterCollectorSystemdCollectConfig `json:"collect,omitzero,omitempty"` +} + +// NodeExporterCollectorSystemdCollectConfig holds configuration options for the systemd collector +// when it is actively collecting metrics. At least one field must be specified. +// +kubebuilder:validation:MinProperties=1 +type NodeExporterCollectorSystemdCollectConfig struct { + // units is a list of regular expression patterns that match systemd units to be included + // by the systemd collector. + // units is optional. + // By default, the list is empty, so the collector exposes no metrics for systemd units. + // Each entry is a regular expression pattern and must be at least 1 character and at most 1024 characters. + // Maximum length for this list is 50. + // Minimum length for this list is 1. + // Entries in this list must be unique. + // +kubebuilder:validation:MaxItems=50 + // +kubebuilder:validation:MinItems=1 + // +listType=set + // +optional + Units []NodeExporterSystemdUnit `json:"units,omitempty"` +} + +// NodeExporterSystemdUnit is a string that is interpreted as a Go regular expression +// pattern by the controller to match systemd unit names. +// Invalid regular expressions will cause a controller-level error at runtime. +// Must be at least 1 character and at most 1024 characters. +// +kubebuilder:validation:MinLength=1 +// +kubebuilder:validation:MaxLength=1024 +type NodeExporterSystemdUnit string + +// MonitoringPluginConfig provides configuration options for the monitoring plugin +// that runs as a dynamic plugin of the OpenShift web console. +// The monitoring plugin provides the monitoring UI in the OpenShift web console +// for visualizing metrics, alerts, and dashboards. +// At least one field must be specified; an empty monitoringPluginConfig object is not allowed. +// +kubebuilder:validation:MinProperties=1 +type MonitoringPluginConfig struct { + // nodeSelector defines the nodes on which the Pods are scheduled. + // nodeSelector is optional. + // + // When omitted, this means the user has no opinion and the platform is left + // to choose reasonable defaults. These defaults are subject to change over time. + // The current default value is `kubernetes.io/os: linux`. + // When specified, nodeSelector must contain at least 1 entry and must not contain more than 10 entries. + // +optional + // +kubebuilder:validation:MinProperties=1 + // +kubebuilder:validation:MaxProperties=10 + NodeSelector map[string]string `json:"nodeSelector,omitempty"` + // resources defines the compute resource requests and limits for the monitoring-plugin container. + // This includes CPU, memory and HugePages constraints to help control scheduling and resource usage. + // When not specified, defaults are used by the platform. Requests cannot exceed limits. + // This field is optional. + // More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + // This is a simplified API that maps to Kubernetes ResourceRequirements. + // The current default values are: + // resources: + // - name: cpu + // request: 10m + // - name: memory + // request: 50Mi + // + // When specified, resources must contain at least 1 entry and must not exceed 5 entries. + // +optional + // +listType=map + // +listMapKey=name + // +kubebuilder:validation:MaxItems=5 + // +kubebuilder:validation:MinItems=1 + Resources []ContainerResource `json:"resources,omitempty"` + // tolerations defines the tolerations required for the monitoring-plugin Pods. + // This field is optional. + // + // When omitted, the monitoring-plugin Pods will not have any tolerations, which + // means they will only be scheduled on nodes with no taints. + // When specified, tolerations must contain at least 1 entry and must not contain more than 10 entries. + // +optional + // +listType=atomic + // +kubebuilder:validation:MinItems=1 + // +kubebuilder:validation:MaxItems=10 + Tolerations []v1.Toleration `json:"tolerations,omitempty"` + // topologySpreadConstraints defines rules for how monitoring-plugin Pods should be distributed + // across topology domains such as zones, nodes, or other user-defined labels. + // topologySpreadConstraints is optional. + // This helps improve high availability and resource efficiency by avoiding placing + // too many replicas in the same failure domain. + // + // When omitted, this means no opinion and the platform is left to choose a default, which is subject to change over time. + // This field maps directly to the `topologySpreadConstraints` field in the Pod spec. + // Default is empty list. + // When specified, this list must contain at least 1 entry and must not exceed 10 entries. + // +kubebuilder:validation:MaxItems=10 + // +kubebuilder:validation:MinItems=1 + // +listType=map + // +listMapKey=topologyKey + // +listMapKey=whenUnsatisfiable + // +optional + TopologySpreadConstraints []v1.TopologySpreadConstraint `json:"topologySpreadConstraints,omitempty"` +} + // UserDefinedMonitoring config for user-defined projects. type UserDefinedMonitoring struct { // mode defines the different configurations of UserDefinedMonitoring @@ -288,13 +820,13 @@ type AlertmanagerCustomConfig struct { // - name: memory // request: 40Mi // limit: null - // Maximum length for this list is 10. + // Maximum length for this list is 5. // Minimum length for this list is 1. // Each resource name must be unique within this list. // +optional // +listType=map // +listMapKey=name - // +kubebuilder:validation:MaxItems=10 + // +kubebuilder:validation:MaxItems=5 // +kubebuilder:validation:MinItems=1 Resources []ContainerResource `json:"resources,omitempty"` // secrets defines a list of secrets that need to be mounted into the Alertmanager. @@ -392,6 +924,13 @@ const ( LogLevelDebug LogLevel = "Debug" ) +// MaxItems on []ContainerResource fields is kept at 5 to stay within the +// Kubernetes CRD CEL validation cost budget (StaticEstimatedCRDCostLimit). +// The quantity() CEL function has a high fixed estimated cost per invocation, +// and the limit-vs-request comparison rule is costed per maxItems per location. +// With multiple structs in ClusterMonitoringSpec embedding []ContainerResource, +// maxItems > 5 causes the total estimated rule cost to exceed the budget. + // ContainerResource defines a single resource requirement for a container. // +kubebuilder:validation:XValidation:rule="has(self.request) || has(self.limit)",message="at least one of request or limit must be set" // +kubebuilder:validation:XValidation:rule="!(has(self.request) && has(self.limit)) || quantity(self.limit).compareTo(quantity(self.request)) >= 0",message="limit must be greater than or equal to request" @@ -408,6 +947,7 @@ type ContainerResource struct { // request is the minimum amount of the resource required (e.g. "2Mi", "1Gi"). // This field is optional. // When limit is specified, request cannot be greater than limit. + // The value must be greater than 0 when specified. // +optional // +kubebuilder:validation:XIntOrString // +kubebuilder:validation:MaxLength=20 @@ -491,13 +1031,13 @@ type MetricsServerConfig struct { // - name: memory // request: 40Mi // limit: null - // Maximum length for this list is 10. + // Maximum length for this list is 5. // Minimum length for this list is 1. // Each resource name must be unique within this list. // +optional // +listType=map // +listMapKey=name - // +kubebuilder:validation:MaxItems=10 + // +kubebuilder:validation:MaxItems=5 // +kubebuilder:validation:MinItems=1 Resources []ContainerResource `json:"resources,omitempty"` // topologySpreadConstraints defines rules for how Metrics Server Pods should be distributed @@ -562,13 +1102,13 @@ type PrometheusOperatorConfig struct { // - name: memory // request: 40Mi // limit: null - // Maximum length for this list is 10. + // Maximum length for this list is 5. // Minimum length for this list is 1. // Each resource name must be unique within this list. // +optional // +listType=map // +listMapKey=name - // +kubebuilder:validation:MaxItems=10 + // +kubebuilder:validation:MaxItems=5 // +kubebuilder:validation:MinItems=1 Resources []ContainerResource `json:"resources,omitempty"` // tolerations defines tolerations for the pods. @@ -626,13 +1166,13 @@ type PrometheusOperatorAdmissionWebhookConfig struct { // - name: memory // request: 30Mi // limit: null - // Maximum length for this list is 10. + // Maximum length for this list is 5. // Minimum length for this list is 1. // Each resource name must be unique within this list. // +optional // +listType=map // +listMapKey=name - // +kubebuilder:validation:MaxItems=10 + // +kubebuilder:validation:MaxItems=5 // +kubebuilder:validation:MinItems=1 Resources []ContainerResource `json:"resources,omitempty"` // topologySpreadConstraints defines rules for how admission webhook Pods should be distributed @@ -761,18 +1301,24 @@ type PrometheusConfig struct { // resources defines the compute resource requests and limits for the Prometheus container. // This includes CPU, memory and HugePages constraints to help control scheduling and resource usage. // When not specified, defaults are used by the platform. Requests cannot exceed limits. - // Each entry must have a unique resource name. - // Minimum of 1 and maximum of 10 resource entries can be specified. + // This field is optional. + // More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + // This is a simplified API that maps to Kubernetes ResourceRequirements. // The current default values are: // resources: // - name: cpu // request: 4m + // limit: null // - name: memory // request: 40Mi + // limit: null + // Maximum length for this list is 5. + // Minimum length for this list is 1. + // Each resource name must be unique within this list. // +optional // +listType=map // +listMapKey=name - // +kubebuilder:validation:MaxItems=10 + // +kubebuilder:validation:MaxItems=5 // +kubebuilder:validation:MinItems=1 Resources []ContainerResource `json:"resources,omitempty"` // retention configures how long Prometheus retains metrics data and how much storage it can use. @@ -1734,6 +2280,153 @@ const ( CollectionProfileMinimal CollectionProfile = "Minimal" ) +// TelemeterClientConfig provides configuration options for the Telemeter Client component +// that runs in the `openshift-monitoring` namespace. The Telemeter Client collects selected +// monitoring metrics and forwards them to Red Hat for telemetry purposes. +// At least one field must be specified. +// +kubebuilder:validation:MinProperties=1 +type TelemeterClientConfig struct { + // nodeSelector defines the nodes on which the Pods are scheduled. + // nodeSelector is optional. + // + // When omitted, this means the user has no opinion and the platform is left + // to choose reasonable defaults. These defaults are subject to change over time. + // The current default value is `kubernetes.io/os: linux`. + // When specified, nodeSelector must contain at least 1 entry and must not contain more than 10 entries. + // +optional + // +kubebuilder:validation:MinProperties=1 + // +kubebuilder:validation:MaxProperties=10 + NodeSelector map[string]string `json:"nodeSelector,omitempty"` + // resources defines the compute resource requests and limits for the Telemeter Client container. + // This includes CPU, memory and HugePages constraints to help control scheduling and resource usage. + // When not specified, defaults are used by the platform. Requests cannot exceed limits. + // This field is optional. + // More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + // This is a simplified API that maps to Kubernetes ResourceRequirements. + // The current default values are: + // resources: + // - name: cpu + // request: 1m + // limit: null + // - name: memory + // request: 40Mi + // limit: null + // Maximum length for this list is 5. + // Minimum length for this list is 1. + // Each resource name must be unique within this list. + // +optional + // +listType=map + // +listMapKey=name + // +kubebuilder:validation:MaxItems=5 + // +kubebuilder:validation:MinItems=1 + Resources []ContainerResource `json:"resources,omitempty"` + // tolerations defines tolerations for the pods. + // tolerations is optional. + // + // When omitted, this means the user has no opinion and the platform is left + // to choose reasonable defaults. These defaults are subject to change over time. + // Defaults are empty/unset. + // Maximum length for this list is 10. + // Minimum length for this list is 1. + // +kubebuilder:validation:MaxItems=10 + // +kubebuilder:validation:MinItems=1 + // +listType=atomic + // +optional + Tolerations []v1.Toleration `json:"tolerations,omitempty"` + // topologySpreadConstraints defines rules for how Telemeter Client Pods should be distributed + // across topology domains such as zones, nodes, or other user-defined labels. + // topologySpreadConstraints is optional. + // This helps improve high availability and resource efficiency by avoiding placing + // too many replicas in the same failure domain. + // + // When omitted, this means no opinion and the platform is left to choose a default, which is subject to change over time. + // This field maps directly to the `topologySpreadConstraints` field in the Pod spec. + // Default is empty list. + // Maximum length for this list is 10. + // Minimum length for this list is 1. + // Entries must have unique topologyKey and whenUnsatisfiable pairs. + // +kubebuilder:validation:MaxItems=10 + // +kubebuilder:validation:MinItems=1 + // +listType=map + // +listMapKey=topologyKey + // +listMapKey=whenUnsatisfiable + // +optional + TopologySpreadConstraints []v1.TopologySpreadConstraint `json:"topologySpreadConstraints,omitempty"` +} + +// ThanosQuerierConfig provides configuration options for the Thanos Querier component +// that runs in the `openshift-monitoring` namespace. +// At least one field must be specified; an empty thanosQuerierConfig object is not allowed. +// +kubebuilder:validation:MinProperties=1 +type ThanosQuerierConfig struct { + // nodeSelector defines the nodes on which the Pods are scheduled. + // nodeSelector is optional. + // + // When omitted, this means the user has no opinion and the platform is left + // to choose reasonable defaults. These defaults are subject to change over time. + // The current default value is `kubernetes.io/os: linux`. + // When specified, nodeSelector must contain at least 1 entry and must not contain more than 10 entries. + // +optional + // +kubebuilder:validation:MinProperties=1 + // +kubebuilder:validation:MaxProperties=10 + NodeSelector map[string]string `json:"nodeSelector,omitempty"` + // resources defines the compute resource requests and limits for the Thanos Querier container. + // resources is optional. + // + // When omitted, this means the user has no opinion and the platform is left + // to choose reasonable defaults. These defaults are subject to change over time. + // Requests cannot exceed limits. + // More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + // This is a simplified API that maps to Kubernetes ResourceRequirements. + // The current default values are: + // resources: + // - name: cpu + // request: 5m + // - name: memory + // request: 12Mi + // Maximum length for this list is 5. + // Minimum length for this list is 1. + // Each resource name must be unique within this list. + // +optional + // +listType=map + // +listMapKey=name + // +kubebuilder:validation:MaxItems=5 + // +kubebuilder:validation:MinItems=1 + Resources []ContainerResource `json:"resources,omitempty"` + // tolerations defines tolerations for the pods. + // tolerations is optional. + // + // When omitted, this means the user has no opinion and the platform is left + // to choose reasonable defaults. These defaults are subject to change over time. + // Defaults are empty/unset. + // Maximum length for this list is 10. + // Minimum length for this list is 1. + // +kubebuilder:validation:MaxItems=10 + // +kubebuilder:validation:MinItems=1 + // +listType=atomic + // +optional + Tolerations []v1.Toleration `json:"tolerations,omitempty"` + // topologySpreadConstraints defines rules for how Thanos Querier Pods should be distributed + // across topology domains such as zones, nodes, or other user-defined labels. + // topologySpreadConstraints is optional. + // This helps improve high availability and resource efficiency by avoiding placing + // too many replicas in the same failure domain. + // + // When omitted, this means no opinion and the platform is left to choose a default, which is subject to change over time. + // This field maps directly to the `topologySpreadConstraints` field in the Pod spec. + // Defaults are empty/unset. + // Maximum length for this list is 10. + // Minimum length for this list is 1. + // Entries must have unique topologyKey and whenUnsatisfiable pairs. + // +kubebuilder:validation:MaxItems=10 + // +kubebuilder:validation:MinItems=1 + // +listType=map + // +listMapKey=topologyKey + // +listMapKey=whenUnsatisfiable + // +optional + TopologySpreadConstraints []v1.TopologySpreadConstraint `json:"topologySpreadConstraints,omitempty"` +} + // AuditProfile defines the audit log level for the Metrics Server. // +kubebuilder:validation:Enum=None;Metadata;Request;RequestResponse type AuditProfile string diff --git a/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.deepcopy.go index ad6afabff9..f0f0a86de6 100644 --- a/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.deepcopy.go @@ -447,6 +447,10 @@ func (in *ClusterMonitoringSpec) DeepCopyInto(out *ClusterMonitoringSpec) { in.PrometheusOperatorConfig.DeepCopyInto(&out.PrometheusOperatorConfig) in.PrometheusOperatorAdmissionWebhookConfig.DeepCopyInto(&out.PrometheusOperatorAdmissionWebhookConfig) in.OpenShiftStateMetricsConfig.DeepCopyInto(&out.OpenShiftStateMetricsConfig) + in.TelemeterClientConfig.DeepCopyInto(&out.TelemeterClientConfig) + in.ThanosQuerierConfig.DeepCopyInto(&out.ThanosQuerierConfig) + in.NodeExporterConfig.DeepCopyInto(&out.NodeExporterConfig) + in.MonitoringPluginConfig.DeepCopyInto(&out.MonitoringPluginConfig) return } @@ -873,6 +877,322 @@ func (in *MetricsServerConfig) DeepCopy() *MetricsServerConfig { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitoringPluginConfig) DeepCopyInto(out *MonitoringPluginConfig) { + *out = *in + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]ContainerResource, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Tolerations != nil { + in, out := &in.Tolerations, &out.Tolerations + *out = make([]v1.Toleration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TopologySpreadConstraints != nil { + in, out := &in.TopologySpreadConstraints, &out.TopologySpreadConstraints + *out = make([]v1.TopologySpreadConstraint, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitoringPluginConfig. +func (in *MonitoringPluginConfig) DeepCopy() *MonitoringPluginConfig { + if in == nil { + return nil + } + out := new(MonitoringPluginConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeExporterCollectorBuddyInfoConfig) DeepCopyInto(out *NodeExporterCollectorBuddyInfoConfig) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeExporterCollectorBuddyInfoConfig. +func (in *NodeExporterCollectorBuddyInfoConfig) DeepCopy() *NodeExporterCollectorBuddyInfoConfig { + if in == nil { + return nil + } + out := new(NodeExporterCollectorBuddyInfoConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeExporterCollectorConfig) DeepCopyInto(out *NodeExporterCollectorConfig) { + *out = *in + out.CpuFreq = in.CpuFreq + out.TcpStat = in.TcpStat + out.Ethtool = in.Ethtool + out.NetDev = in.NetDev + out.NetClass = in.NetClass + out.BuddyInfo = in.BuddyInfo + out.MountStats = in.MountStats + out.Ksmd = in.Ksmd + out.Processes = in.Processes + in.Systemd.DeepCopyInto(&out.Systemd) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeExporterCollectorConfig. +func (in *NodeExporterCollectorConfig) DeepCopy() *NodeExporterCollectorConfig { + if in == nil { + return nil + } + out := new(NodeExporterCollectorConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeExporterCollectorCpufreqConfig) DeepCopyInto(out *NodeExporterCollectorCpufreqConfig) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeExporterCollectorCpufreqConfig. +func (in *NodeExporterCollectorCpufreqConfig) DeepCopy() *NodeExporterCollectorCpufreqConfig { + if in == nil { + return nil + } + out := new(NodeExporterCollectorCpufreqConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeExporterCollectorEthtoolConfig) DeepCopyInto(out *NodeExporterCollectorEthtoolConfig) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeExporterCollectorEthtoolConfig. +func (in *NodeExporterCollectorEthtoolConfig) DeepCopy() *NodeExporterCollectorEthtoolConfig { + if in == nil { + return nil + } + out := new(NodeExporterCollectorEthtoolConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeExporterCollectorKSMDConfig) DeepCopyInto(out *NodeExporterCollectorKSMDConfig) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeExporterCollectorKSMDConfig. +func (in *NodeExporterCollectorKSMDConfig) DeepCopy() *NodeExporterCollectorKSMDConfig { + if in == nil { + return nil + } + out := new(NodeExporterCollectorKSMDConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeExporterCollectorMountStatsConfig) DeepCopyInto(out *NodeExporterCollectorMountStatsConfig) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeExporterCollectorMountStatsConfig. +func (in *NodeExporterCollectorMountStatsConfig) DeepCopy() *NodeExporterCollectorMountStatsConfig { + if in == nil { + return nil + } + out := new(NodeExporterCollectorMountStatsConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeExporterCollectorNetClassCollectConfig) DeepCopyInto(out *NodeExporterCollectorNetClassCollectConfig) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeExporterCollectorNetClassCollectConfig. +func (in *NodeExporterCollectorNetClassCollectConfig) DeepCopy() *NodeExporterCollectorNetClassCollectConfig { + if in == nil { + return nil + } + out := new(NodeExporterCollectorNetClassCollectConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeExporterCollectorNetClassConfig) DeepCopyInto(out *NodeExporterCollectorNetClassConfig) { + *out = *in + out.Collect = in.Collect + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeExporterCollectorNetClassConfig. +func (in *NodeExporterCollectorNetClassConfig) DeepCopy() *NodeExporterCollectorNetClassConfig { + if in == nil { + return nil + } + out := new(NodeExporterCollectorNetClassConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeExporterCollectorNetDevConfig) DeepCopyInto(out *NodeExporterCollectorNetDevConfig) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeExporterCollectorNetDevConfig. +func (in *NodeExporterCollectorNetDevConfig) DeepCopy() *NodeExporterCollectorNetDevConfig { + if in == nil { + return nil + } + out := new(NodeExporterCollectorNetDevConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeExporterCollectorProcessesConfig) DeepCopyInto(out *NodeExporterCollectorProcessesConfig) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeExporterCollectorProcessesConfig. +func (in *NodeExporterCollectorProcessesConfig) DeepCopy() *NodeExporterCollectorProcessesConfig { + if in == nil { + return nil + } + out := new(NodeExporterCollectorProcessesConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeExporterCollectorSystemdCollectConfig) DeepCopyInto(out *NodeExporterCollectorSystemdCollectConfig) { + *out = *in + if in.Units != nil { + in, out := &in.Units, &out.Units + *out = make([]NodeExporterSystemdUnit, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeExporterCollectorSystemdCollectConfig. +func (in *NodeExporterCollectorSystemdCollectConfig) DeepCopy() *NodeExporterCollectorSystemdCollectConfig { + if in == nil { + return nil + } + out := new(NodeExporterCollectorSystemdCollectConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeExporterCollectorSystemdConfig) DeepCopyInto(out *NodeExporterCollectorSystemdConfig) { + *out = *in + in.Collect.DeepCopyInto(&out.Collect) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeExporterCollectorSystemdConfig. +func (in *NodeExporterCollectorSystemdConfig) DeepCopy() *NodeExporterCollectorSystemdConfig { + if in == nil { + return nil + } + out := new(NodeExporterCollectorSystemdConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeExporterCollectorTcpStatConfig) DeepCopyInto(out *NodeExporterCollectorTcpStatConfig) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeExporterCollectorTcpStatConfig. +func (in *NodeExporterCollectorTcpStatConfig) DeepCopy() *NodeExporterCollectorTcpStatConfig { + if in == nil { + return nil + } + out := new(NodeExporterCollectorTcpStatConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeExporterConfig) DeepCopyInto(out *NodeExporterConfig) { + *out = *in + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]ContainerResource, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Tolerations != nil { + in, out := &in.Tolerations, &out.Tolerations + *out = make([]v1.Toleration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.Collectors.DeepCopyInto(&out.Collectors) + if in.IgnoredNetworkDevices != nil { + in, out := &in.IgnoredNetworkDevices, &out.IgnoredNetworkDevices + *out = new([]NodeExporterIgnoredNetworkDevice) + if **in != nil { + in, out := *in, *out + *out = make([]NodeExporterIgnoredNetworkDevice, len(*in)) + copy(*out, *in) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeExporterConfig. +func (in *NodeExporterConfig) DeepCopy() *NodeExporterConfig { + if in == nil { + return nil + } + out := new(NodeExporterConfig) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *OAuth2) DeepCopyInto(out *OAuth2) { *out = *in @@ -1584,6 +1904,94 @@ func (in *TLSConfig) DeepCopy() *TLSConfig { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TelemeterClientConfig) DeepCopyInto(out *TelemeterClientConfig) { + *out = *in + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]ContainerResource, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Tolerations != nil { + in, out := &in.Tolerations, &out.Tolerations + *out = make([]v1.Toleration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TopologySpreadConstraints != nil { + in, out := &in.TopologySpreadConstraints, &out.TopologySpreadConstraints + *out = make([]v1.TopologySpreadConstraint, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TelemeterClientConfig. +func (in *TelemeterClientConfig) DeepCopy() *TelemeterClientConfig { + if in == nil { + return nil + } + out := new(TelemeterClientConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ThanosQuerierConfig) DeepCopyInto(out *ThanosQuerierConfig) { + *out = *in + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]ContainerResource, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Tolerations != nil { + in, out := &in.Tolerations, &out.Tolerations + *out = make([]v1.Toleration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TopologySpreadConstraints != nil { + in, out := &in.TopologySpreadConstraints, &out.TopologySpreadConstraints + *out = make([]v1.TopologySpreadConstraint, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ThanosQuerierConfig. +func (in *ThanosQuerierConfig) DeepCopy() *ThanosQuerierConfig { + if in == nil { + return nil + } + out := new(ThanosQuerierConfig) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *UppercaseActionConfig) DeepCopyInto(out *UppercaseActionConfig) { *out = *in diff --git a/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.swagger_doc_generated.go index b79cbbf774..45e803f588 100644 --- a/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.swagger_doc_generated.go @@ -109,7 +109,7 @@ var map_AlertmanagerCustomConfig = map[string]string{ "": "AlertmanagerCustomConfig represents the configuration for a custom Alertmanager deployment. alertmanagerCustomConfig provides configuration options for the default Alertmanager instance that runs in the `openshift-monitoring` namespace. Use this configuration to control whether the default Alertmanager is deployed, how it logs, and how its pods are scheduled.", "logLevel": "logLevel defines the verbosity of logs emitted by Alertmanager. This field allows users to control the amount and severity of logs generated, which can be useful for debugging issues or reducing noise in production environments. Allowed values are Error, Warn, Info, and Debug. When set to Error, only errors will be logged. When set to Warn, both warnings and errors will be logged. When set to Info, general information, warnings, and errors will all be logged. When set to Debug, detailed debugging information will be logged. When omitted, this means no opinion and the platform is left to choose a reasonable default, that is subject to change over time. The current default value is `Info`.", "nodeSelector": "nodeSelector defines the nodes on which the Pods are scheduled nodeSelector is optional.\n\nWhen omitted, this means the user has no opinion and the platform is left to choose reasonable defaults. These defaults are subject to change over time. The current default value is `kubernetes.io/os: linux`.", - "resources": "resources defines the compute resource requests and limits for the Alertmanager container. This includes CPU, memory and HugePages constraints to help control scheduling and resource usage. When not specified, defaults are used by the platform. Requests cannot exceed limits. This field is optional. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ This is a simplified API that maps to Kubernetes ResourceRequirements. The current default values are:\n resources:\n - name: cpu\n request: 4m\n limit: null\n - name: memory\n request: 40Mi\n limit: null\nMaximum length for this list is 10. Minimum length for this list is 1. Each resource name must be unique within this list.", + "resources": "resources defines the compute resource requests and limits for the Alertmanager container. This includes CPU, memory and HugePages constraints to help control scheduling and resource usage. When not specified, defaults are used by the platform. Requests cannot exceed limits. This field is optional. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ This is a simplified API that maps to Kubernetes ResourceRequirements. The current default values are:\n resources:\n - name: cpu\n request: 4m\n limit: null\n - name: memory\n request: 40Mi\n limit: null\nMaximum length for this list is 5. Minimum length for this list is 1. Each resource name must be unique within this list.", "secrets": "secrets defines a list of secrets that need to be mounted into the Alertmanager. The secrets must reside within the same namespace as the Alertmanager object. They will be added as volumes named secret- and mounted at /etc/alertmanager/secrets/ within the 'alertmanager' container of the Alertmanager Pods.\n\nThese secrets can be used to authenticate Alertmanager with endpoint receivers. For example, you can use secrets to: - Provide certificates for TLS authentication with receivers that require private CA certificates - Store credentials for Basic HTTP authentication with receivers that require password-based auth - Store any other authentication credentials needed by your alert receivers\n\nThis field is optional. Maximum length for this list is 10. Minimum length for this list is 1. Entries in this list must be unique.", "tolerations": "tolerations defines tolerations for the pods. tolerations is optional.\n\nWhen omitted, this means the user has no opinion and the platform is left to choose reasonable defaults. These defaults are subject to change over time. Defaults are empty/unset. Maximum length for this list is 10. Minimum length for this list is 1.", "topologySpreadConstraints": "topologySpreadConstraints defines rules for how Alertmanager Pods should be distributed across topology domains such as zones, nodes, or other user-defined labels. topologySpreadConstraints is optional. This helps improve high availability and resource efficiency by avoiding placing too many replicas in the same failure domain.\n\nWhen omitted, this means no opinion and the platform is left to choose a default, which is subject to change over time. This field maps directly to the `topologySpreadConstraints` field in the Pod spec. Default is empty list. Maximum length for this list is 10. Minimum length for this list is 1. Entries must have unique topologyKey and whenUnsatisfiable pairs.", @@ -179,6 +179,10 @@ var map_ClusterMonitoringSpec = map[string]string{ "prometheusOperatorConfig": "prometheusOperatorConfig is an optional field that can be used to configure the Prometheus Operator component. Specifically, it can configure how the Prometheus Operator instance is deployed, pod scheduling, and resource allocation. When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time.", "prometheusOperatorAdmissionWebhookConfig": "prometheusOperatorAdmissionWebhookConfig is an optional field that can be used to configure the admission webhook component of Prometheus Operator that runs in the openshift-monitoring namespace. The admission webhook validates PrometheusRule and AlertmanagerConfig objects to ensure they are semantically valid, mutates PrometheusRule annotations, and converts AlertmanagerConfig objects between API versions. When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time.", "openShiftStateMetricsConfig": "openShiftStateMetricsConfig is an optional field that can be used to configure the openshift-state-metrics agent that runs in the openshift-monitoring namespace. The openshift-state-metrics agent generates metrics about the state of OpenShift-specific Kubernetes objects, such as routes, builds, and deployments. When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time.", + "telemeterClientConfig": "telemeterClientConfig is an optional field that can be used to configure the Telemeter Client component that runs in the openshift-monitoring namespace. The Telemeter Client collects selected monitoring metrics and forwards them to Red Hat for telemetry purposes. When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time. When set, at least one field must be specified within telemeterClientConfig.", + "thanosQuerierConfig": "thanosQuerierConfig is an optional field that can be used to configure the Thanos Querier component that runs in the openshift-monitoring namespace. The Thanos Querier provides a global query view by aggregating and deduplicating metrics from multiple Prometheus instances. When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time. The current default deploys the Thanos Querier on linux nodes with 5m CPU and 12Mi memory requests, and no custom tolerations or topology spread constraints. When set, at least one field must be specified within thanosQuerierConfig.", + "nodeExporterConfig": "nodeExporterConfig is an optional field that can be used to configure the node-exporter agent that runs as a DaemonSet in the openshift-monitoring namespace. The node-exporter agent collects hardware and OS-level metrics from every node in the cluster. When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time.", + "monitoringPluginConfig": "monitoringPluginConfig is an optional field that can be used to configure the monitoring plugin that runs as a dynamic plugin of the OpenShift web console. The monitoring plugin provides the monitoring UI in the OpenShift web console for visualizing metrics, alerts, and dashboards. When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time. The current default deploys the monitoring-plugin as a single-replica Deployment on linux nodes with 10m CPU and 50Mi memory requests, and no custom tolerations or topology spread constraints. When set, at least one field must be specified within monitoringPluginConfig.", } func (ClusterMonitoringSpec) SwaggerDoc() map[string]string { @@ -196,7 +200,7 @@ func (ClusterMonitoringStatus) SwaggerDoc() map[string]string { var map_ContainerResource = map[string]string{ "": "ContainerResource defines a single resource requirement for a container.", "name": "name of the resource (e.g. \"cpu\", \"memory\", \"hugepages-2Mi\"). This field is required. name must consist only of alphanumeric characters, `-`, `_` and `.` and must start and end with an alphanumeric character.", - "request": "request is the minimum amount of the resource required (e.g. \"2Mi\", \"1Gi\"). This field is optional. When limit is specified, request cannot be greater than limit.", + "request": "request is the minimum amount of the resource required (e.g. \"2Mi\", \"1Gi\"). This field is optional. When limit is specified, request cannot be greater than limit. The value must be greater than 0 when specified.", "limit": "limit is the maximum amount of the resource allowed (e.g. \"2Mi\", \"1Gi\"). This field is optional. When request is specified, limit cannot be less than request. The value must be greater than 0 when specified.", } @@ -285,7 +289,7 @@ var map_MetricsServerConfig = map[string]string{ "nodeSelector": "nodeSelector defines the nodes on which the Pods are scheduled nodeSelector is optional.\n\nWhen omitted, this means the user has no opinion and the platform is left to choose reasonable defaults. These defaults are subject to change over time. The current default value is `kubernetes.io/os: linux`.", "tolerations": "tolerations defines tolerations for the pods. tolerations is optional.\n\nWhen omitted, this means the user has no opinion and the platform is left to choose reasonable defaults. These defaults are subject to change over time. Defaults are empty/unset. Maximum length for this list is 10. Minimum length for this list is 1.", "verbosity": "verbosity defines the verbosity of log messages for Metrics Server. Valid values are Errors, Info, Trace, TraceAll and omitted. When set to Errors, only critical messages and errors are logged. When set to Info, only basic information messages are logged. When set to Trace, information useful for general debugging is logged. When set to TraceAll, detailed information about metric scraping is logged. When omitted, this means no opinion and the platform is left to choose a reasonable default, that is subject to change over time. The current default value is `Errors`", - "resources": "resources defines the compute resource requests and limits for the Metrics Server container. This includes CPU, memory and HugePages constraints to help control scheduling and resource usage. When not specified, defaults are used by the platform. Requests cannot exceed limits. This field is optional. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ This is a simplified API that maps to Kubernetes ResourceRequirements. The current default values are:\n resources:\n - name: cpu\n request: 4m\n limit: null\n - name: memory\n request: 40Mi\n limit: null\nMaximum length for this list is 10. Minimum length for this list is 1. Each resource name must be unique within this list.", + "resources": "resources defines the compute resource requests and limits for the Metrics Server container. This includes CPU, memory and HugePages constraints to help control scheduling and resource usage. When not specified, defaults are used by the platform. Requests cannot exceed limits. This field is optional. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ This is a simplified API that maps to Kubernetes ResourceRequirements. The current default values are:\n resources:\n - name: cpu\n request: 4m\n limit: null\n - name: memory\n request: 40Mi\n limit: null\nMaximum length for this list is 5. Minimum length for this list is 1. Each resource name must be unique within this list.", "topologySpreadConstraints": "topologySpreadConstraints defines rules for how Metrics Server Pods should be distributed across topology domains such as zones, nodes, or other user-defined labels. topologySpreadConstraints is optional. This helps improve high availability and resource efficiency by avoiding placing too many replicas in the same failure domain.\n\nWhen omitted, this means no opinion and the platform is left to choose a default, which is subject to change over time. This field maps directly to the `topologySpreadConstraints` field in the Pod spec. Default is empty list. Maximum length for this list is 10. Minimum length for this list is 1. Entries must have unique topologyKey and whenUnsatisfiable pairs.", } @@ -293,6 +297,160 @@ func (MetricsServerConfig) SwaggerDoc() map[string]string { return map_MetricsServerConfig } +var map_MonitoringPluginConfig = map[string]string{ + "": "MonitoringPluginConfig provides configuration options for the monitoring plugin that runs as a dynamic plugin of the OpenShift web console. The monitoring plugin provides the monitoring UI in the OpenShift web console for visualizing metrics, alerts, and dashboards. At least one field must be specified; an empty monitoringPluginConfig object is not allowed.", + "nodeSelector": "nodeSelector defines the nodes on which the Pods are scheduled. nodeSelector is optional.\n\nWhen omitted, this means the user has no opinion and the platform is left to choose reasonable defaults. These defaults are subject to change over time. The current default value is `kubernetes.io/os: linux`. When specified, nodeSelector must contain at least 1 entry and must not contain more than 10 entries.", + "resources": "resources defines the compute resource requests and limits for the monitoring-plugin container. This includes CPU, memory and HugePages constraints to help control scheduling and resource usage. When not specified, defaults are used by the platform. Requests cannot exceed limits. This field is optional. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ This is a simplified API that maps to Kubernetes ResourceRequirements. The current default values are:\n resources:\n - name: cpu\n request: 10m\n - name: memory\n request: 50Mi\n\nWhen specified, resources must contain at least 1 entry and must not exceed 5 entries.", + "tolerations": "tolerations defines the tolerations required for the monitoring-plugin Pods. This field is optional.\n\nWhen omitted, the monitoring-plugin Pods will not have any tolerations, which means they will only be scheduled on nodes with no taints. When specified, tolerations must contain at least 1 entry and must not contain more than 10 entries.", + "topologySpreadConstraints": "topologySpreadConstraints defines rules for how monitoring-plugin Pods should be distributed across topology domains such as zones, nodes, or other user-defined labels. topologySpreadConstraints is optional. This helps improve high availability and resource efficiency by avoiding placing too many replicas in the same failure domain.\n\nWhen omitted, this means no opinion and the platform is left to choose a default, which is subject to change over time. This field maps directly to the `topologySpreadConstraints` field in the Pod spec. Default is empty list. When specified, this list must contain at least 1 entry and must not exceed 10 entries.", +} + +func (MonitoringPluginConfig) SwaggerDoc() map[string]string { + return map_MonitoringPluginConfig +} + +var map_NodeExporterCollectorBuddyInfoConfig = map[string]string{ + "": "NodeExporterCollectorBuddyInfoConfig provides configuration for the buddyinfo collector of the node-exporter agent. The buddyinfo collector collects statistics about memory fragmentation from the node_buddyinfo_blocks metric using data from /proc/buddyinfo. It is disabled by default.", + "collectionPolicy": "collectionPolicy declares whether the buddyinfo collector collects metrics. This field is required. Valid values are \"Collect\" and \"DoNotCollect\". When set to \"Collect\", the buddyinfo collector is active and memory fragmentation statistics are collected. When set to \"DoNotCollect\", the buddyinfo collector is inactive.", +} + +func (NodeExporterCollectorBuddyInfoConfig) SwaggerDoc() map[string]string { + return map_NodeExporterCollectorBuddyInfoConfig +} + +var map_NodeExporterCollectorConfig = map[string]string{ + "": "NodeExporterCollectorConfig defines settings for individual collectors of the node-exporter agent. Each collector can be individually set to collect or not collect metrics. At least one collector must be specified.", + "cpuFreq": "cpuFreq configures the cpufreq collector, which collects CPU frequency statistics. cpuFreq is optional. When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time. The current default is disabled. Consider enabling when you need to observe CPU frequency scaling; expect higher CPU usage on many-core nodes when collectionPolicy is Collect.", + "tcpStat": "tcpStat configures the tcpstat collector, which collects TCP connection statistics. tcpStat is optional. When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time. The current default is disabled. Enable when debugging TCP connection behavior or capacity at the node level.", + "ethtool": "ethtool configures the ethtool collector, which collects ethernet device statistics. ethtool is optional. When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time. The current default is disabled. Enable when you need NIC driver-level ethtool metrics beyond generic netdev counters.", + "netDev": "netDev configures the netdev collector, which collects network device statistics. netDev is optional. When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time. The current default is enabled. Turn off if you must reduce per-interface metric cardinality on hosts with many virtual interfaces.", + "netClass": "netClass configures the netclass collector, which collects information about network devices. netClass is optional. When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time. The current default is enabled with netlink mode active. Use statsGatherer when sysfs vs netlink implementation matters or when matching node_exporter tuning.", + "buddyInfo": "buddyInfo configures the buddyinfo collector, which collects statistics about memory fragmentation from the node_buddyinfo_blocks metric. This metric collects data from /proc/buddyinfo. buddyInfo is optional. When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time. The current default is disabled. Enable when investigating kernel memory fragmentation; typically for advanced troubleshooting only.", + "mountStats": "mountStats configures the mountstats collector, which collects statistics about NFS volume I/O activities. mountStats is optional. When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time. The current default is disabled. Enabling this collector may produce metrics with high cardinality. If you enable this collector, closely monitor the prometheus-k8s deployment for excessive memory usage. Enable when you care about per-mount NFS client statistics.", + "ksmd": "ksmd configures the ksmd collector, which collects statistics from the kernel same-page merger daemon. ksmd is optional. When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time. The current default is disabled. Enable on nodes where KSM is in use and you want visibility into merging activity.", + "processes": "processes configures the processes collector, which collects statistics from processes and threads running in the system. processes is optional. When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time. The current default is disabled. Enable for process/thread-level insight; can be expensive on busy nodes.", + "systemd": "systemd configures the systemd collector, which collects statistics on the systemd daemon and its managed services. systemd is optional. When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time. The current default is disabled. Enabling this collector with a long list of selected units may produce metrics with high cardinality. If you enable this collector, closely monitor the prometheus-k8s deployment for excessive memory usage. Enable when you need metrics for specific units; scope units carefully.", +} + +func (NodeExporterCollectorConfig) SwaggerDoc() map[string]string { + return map_NodeExporterCollectorConfig +} + +var map_NodeExporterCollectorCpufreqConfig = map[string]string{ + "": "NodeExporterCollectorCpufreqConfig provides configuration for the cpufreq collector of the node-exporter agent. The cpufreq collector collects CPU frequency statistics. It is disabled by default.", + "collectionPolicy": "collectionPolicy declares whether the cpufreq collector collects metrics. This field is required. Valid values are \"Collect\" and \"DoNotCollect\". When set to \"Collect\", the cpufreq collector is active and CPU frequency statistics are collected. When set to \"DoNotCollect\", the cpufreq collector is inactive.", +} + +func (NodeExporterCollectorCpufreqConfig) SwaggerDoc() map[string]string { + return map_NodeExporterCollectorCpufreqConfig +} + +var map_NodeExporterCollectorEthtoolConfig = map[string]string{ + "": "NodeExporterCollectorEthtoolConfig provides configuration for the ethtool collector of the node-exporter agent. The ethtool collector collects ethernet device statistics. It is disabled by default.", + "collectionPolicy": "collectionPolicy declares whether the ethtool collector collects metrics. This field is required. Valid values are \"Collect\" and \"DoNotCollect\". When set to \"Collect\", the ethtool collector is active and ethernet device statistics are collected. When set to \"DoNotCollect\", the ethtool collector is inactive.", +} + +func (NodeExporterCollectorEthtoolConfig) SwaggerDoc() map[string]string { + return map_NodeExporterCollectorEthtoolConfig +} + +var map_NodeExporterCollectorKSMDConfig = map[string]string{ + "": "NodeExporterCollectorKSMDConfig provides configuration for the ksmd collector of the node-exporter agent. The ksmd collector collects statistics from the kernel same-page merger daemon. It is disabled by default.", + "collectionPolicy": "collectionPolicy declares whether the ksmd collector collects metrics. This field is required. Valid values are \"Collect\" and \"DoNotCollect\". When set to \"Collect\", the ksmd collector is active and kernel same-page merger statistics are collected. When set to \"DoNotCollect\", the ksmd collector is inactive.", +} + +func (NodeExporterCollectorKSMDConfig) SwaggerDoc() map[string]string { + return map_NodeExporterCollectorKSMDConfig +} + +var map_NodeExporterCollectorMountStatsConfig = map[string]string{ + "": "NodeExporterCollectorMountStatsConfig provides configuration for the mountstats collector of the node-exporter agent. The mountstats collector collects statistics about NFS volume I/O activities. It is disabled by default. Enabling this collector may produce metrics with high cardinality. If you enable this collector, closely monitor the prometheus-k8s deployment for excessive memory usage.", + "collectionPolicy": "collectionPolicy declares whether the mountstats collector collects metrics. This field is required. Valid values are \"Collect\" and \"DoNotCollect\". When set to \"Collect\", the mountstats collector is active and NFS volume I/O statistics are collected. When set to \"DoNotCollect\", the mountstats collector is inactive.", +} + +func (NodeExporterCollectorMountStatsConfig) SwaggerDoc() map[string]string { + return map_NodeExporterCollectorMountStatsConfig +} + +var map_NodeExporterCollectorNetClassCollectConfig = map[string]string{ + "": "NodeExporterCollectorNetClassCollectConfig holds configuration options for the netclass collector when it is actively collecting metrics. At least one field must be specified.", + "statsGatherer": "statsGatherer selects which implementation the netclass collector uses to gather statistics (sysfs or netlink). statsGatherer is optional. Valid values are \"Sysfs\" and \"Netlink\". When set to \"Netlink\", the netlink implementation is used; when set to \"Sysfs\", the sysfs implementation is used. When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time. The current default is Netlink.", +} + +func (NodeExporterCollectorNetClassCollectConfig) SwaggerDoc() map[string]string { + return map_NodeExporterCollectorNetClassCollectConfig +} + +var map_NodeExporterCollectorNetClassConfig = map[string]string{ + "": "NodeExporterCollectorNetClassConfig provides configuration for the netclass collector of the node-exporter agent. The netclass collector collects information about network devices such as network speed, MTU, and carrier status. It is enabled by default. When collectionPolicy is DoNotCollect, the collect field must not be set.", + "collectionPolicy": "collectionPolicy declares whether the netclass collector collects metrics. This field is required. Valid values are \"Collect\" and \"DoNotCollect\". When set to \"Collect\", the netclass collector is active and network class information is collected. When set to \"DoNotCollect\", the netclass collector is inactive and the corresponding metrics become unavailable. When set to \"DoNotCollect\", the collect field must not be set.", + "collect": "collect contains configuration options that apply only when the netclass collector is actively collecting metrics (i.e. when collectionPolicy is Collect). collect is optional and may be omitted even when collectionPolicy is Collect. collect may only be set when collectionPolicy is Collect. When set, at least one field must be specified within collect.", +} + +func (NodeExporterCollectorNetClassConfig) SwaggerDoc() map[string]string { + return map_NodeExporterCollectorNetClassConfig +} + +var map_NodeExporterCollectorNetDevConfig = map[string]string{ + "": "NodeExporterCollectorNetDevConfig provides configuration for the netdev collector of the node-exporter agent. The netdev collector collects network device statistics such as bytes, packets, errors, and drops per device. It is enabled by default.", + "collectionPolicy": "collectionPolicy declares whether the netdev collector collects metrics. This field is required. Valid values are \"Collect\" and \"DoNotCollect\". When set to \"Collect\", the netdev collector is active and network device statistics are collected. When set to \"DoNotCollect\", the netdev collector is inactive and the corresponding metrics become unavailable.", +} + +func (NodeExporterCollectorNetDevConfig) SwaggerDoc() map[string]string { + return map_NodeExporterCollectorNetDevConfig +} + +var map_NodeExporterCollectorProcessesConfig = map[string]string{ + "": "NodeExporterCollectorProcessesConfig provides configuration for the processes collector of the node-exporter agent. The processes collector collects statistics from processes and threads running in the system. It is disabled by default.", + "collectionPolicy": "collectionPolicy declares whether the processes collector collects metrics. This field is required. Valid values are \"Collect\" and \"DoNotCollect\". When set to \"Collect\", the processes collector is active and process/thread statistics are collected. When set to \"DoNotCollect\", the processes collector is inactive.", +} + +func (NodeExporterCollectorProcessesConfig) SwaggerDoc() map[string]string { + return map_NodeExporterCollectorProcessesConfig +} + +var map_NodeExporterCollectorSystemdCollectConfig = map[string]string{ + "": "NodeExporterCollectorSystemdCollectConfig holds configuration options for the systemd collector when it is actively collecting metrics. At least one field must be specified.", + "units": "units is a list of regular expression patterns that match systemd units to be included by the systemd collector. units is optional. By default, the list is empty, so the collector exposes no metrics for systemd units. Each entry is a regular expression pattern and must be at least 1 character and at most 1024 characters. Maximum length for this list is 50. Minimum length for this list is 1. Entries in this list must be unique.", +} + +func (NodeExporterCollectorSystemdCollectConfig) SwaggerDoc() map[string]string { + return map_NodeExporterCollectorSystemdCollectConfig +} + +var map_NodeExporterCollectorSystemdConfig = map[string]string{ + "": "NodeExporterCollectorSystemdConfig provides configuration for the systemd collector of the node-exporter agent. The systemd collector collects statistics on the systemd daemon and its managed services. It is disabled by default. Enabling this collector with a long list of selected units may produce metrics with high cardinality. If you enable this collector, closely monitor the prometheus-k8s deployment for excessive memory usage. When collectionPolicy is DoNotCollect, the collect field must not be set.", + "collectionPolicy": "collectionPolicy declares whether the systemd collector collects metrics. This field is required. Valid values are \"Collect\" and \"DoNotCollect\". When set to \"Collect\", the systemd collector is active and systemd unit statistics are collected. When set to \"DoNotCollect\", the systemd collector is inactive and the collect field must not be set.", + "collect": "collect contains configuration options that apply only when the systemd collector is actively collecting metrics (i.e. when collectionPolicy is Collect). collect is optional and may be omitted even when collectionPolicy is Collect. collect may only be set when collectionPolicy is Collect. When set, at least one field must be specified within collect.", +} + +func (NodeExporterCollectorSystemdConfig) SwaggerDoc() map[string]string { + return map_NodeExporterCollectorSystemdConfig +} + +var map_NodeExporterCollectorTcpStatConfig = map[string]string{ + "": "NodeExporterCollectorTcpStatConfig provides configuration for the tcpstat collector of the node-exporter agent. The tcpstat collector collects TCP connection statistics. It is disabled by default.", + "collectionPolicy": "collectionPolicy declares whether the tcpstat collector collects metrics. This field is required. Valid values are \"Collect\" and \"DoNotCollect\". When set to \"Collect\", the tcpstat collector is active and TCP connection statistics are collected. When set to \"DoNotCollect\", the tcpstat collector is inactive.", +} + +func (NodeExporterCollectorTcpStatConfig) SwaggerDoc() map[string]string { + return map_NodeExporterCollectorTcpStatConfig +} + +var map_NodeExporterConfig = map[string]string{ + "": "NodeExporterConfig provides configuration options for the node-exporter agent that runs as a DaemonSet in the `openshift-monitoring` namespace. The node-exporter agent collects hardware and OS-level metrics from every node in the cluster, including CPU, memory, disk, and network statistics. At least one field must be specified.", + "nodeSelector": "nodeSelector defines the nodes on which the Pods are scheduled. nodeSelector is optional.\n\nWhen omitted, this means the user has no opinion and the platform is left to choose reasonable defaults. These defaults are subject to change over time. The current default value is `kubernetes.io/os: linux`. When specified, nodeSelector must contain at least 1 entry and must not contain more than 10 entries.", + "resources": "resources defines the compute resource requests and limits for the node-exporter container. This includes CPU, memory and HugePages constraints to help control scheduling and resource usage. When not specified, defaults are used by the platform. Requests cannot exceed limits. This field is optional. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ This is a simplified API that maps to Kubernetes ResourceRequirements. The current default values are:\n resources:\n - name: cpu\n request: 8m\n limit: null\n - name: memory\n request: 32Mi\n limit: null", + "tolerations": "tolerations defines tolerations for the pods. tolerations is optional.\n\nWhen omitted, this means the user has no opinion and the platform is left to choose reasonable defaults. These defaults are subject to change over time. The current default is to tolerate all taints (operator: Exists without any key), which is typical for DaemonSets that must run on every node. Maximum length for this list is 10. Minimum length for this list is 1.", + "collectors": "collectors configures which node-exporter metric collectors are enabled. collectors is optional. Each collector can be individually enabled or disabled. Some collectors may have additional configuration options.\n\nWhen omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time.", + "maxProcs": "maxProcs sets the target number of CPUs on which the node-exporter process will run. maxProcs is optional. Use this setting to override the default value, which is set either to 4 or to the number of CPUs on the host, whichever is smaller. The default value is computed at runtime and set via the GOMAXPROCS environment variable before node-exporter is launched. If a kernel deadlock occurs or if performance degrades when reading from sysfs concurrently, you can change this value to 1, which limits node-exporter to running on one CPU. For nodes with a high CPU count, setting the limit to a low number saves resources by preventing Go routines from being scheduled to run on all CPUs. However, I/O performance degrades if the maxProcs value is set too low and there are many metrics to collect. The minimum value is 1 and the maximum value is 1024. When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time. The current default is min(4, number of host CPUs).", + "ignoredNetworkDevices": "ignoredNetworkDevices is a list of regular expression patterns that match network devices to be excluded from the relevant collector configuration such as netdev, netclass, and ethtool. ignoredNetworkDevices is optional.\n\nWhen omitted, the Cluster Monitoring Operator uses a predefined list of devices to be excluded to minimize the impact on memory usage. When set as an empty list, no devices are excluded. If you modify this setting, monitor the prometheus-k8s deployment closely for excessive memory usage. Maximum length for this list is 50. Each entry must be at least 1 character and at most 1024 characters long.", +} + +func (NodeExporterConfig) SwaggerDoc() map[string]string { + return map_NodeExporterConfig +} + var map_OAuth2 = map[string]string{ "": "OAuth2 defines OAuth2 authentication settings for the remote write endpoint.", "clientId": "clientId defines the secret reference containing the OAuth2 client ID. The secret must exist in the openshift-monitoring namespace.", @@ -319,7 +477,7 @@ func (OAuth2EndpointParam) SwaggerDoc() map[string]string { var map_OpenShiftStateMetricsConfig = map[string]string{ "": "OpenShiftStateMetricsConfig provides configuration options for the openshift-state-metrics agent that runs in the `openshift-monitoring` namespace. The openshift-state-metrics agent generates metrics about the state of OpenShift-specific Kubernetes objects, such as routes, builds, and deployments.", "nodeSelector": "nodeSelector defines the nodes on which the Pods are scheduled. nodeSelector is optional.\n\nWhen omitted, this means the user has no opinion and the platform is left to choose reasonable defaults. These defaults are subject to change over time. The current default value is `kubernetes.io/os: linux`. When specified, nodeSelector must contain at least 1 entry and must not contain more than 10 entries.", - "resources": "resources defines the compute resource requests and limits for the openshift-state-metrics container. This includes CPU, memory and HugePages constraints to help control scheduling and resource usage. When not specified, defaults are used by the platform. Requests cannot exceed limits. This field is optional. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ This is a simplified API that maps to Kubernetes ResourceRequirements. The current default values are:\n resources:\n - name: cpu\n request: 1m\n limit: null\n - name: memory\n request: 32Mi\n limit: null\nMaximum length for this list is 10. Minimum length for this list is 1. Each resource name must be unique within this list.", + "resources": "resources defines the compute resource requests and limits for the openshift-state-metrics container. This includes CPU, memory and HugePages constraints to help control scheduling and resource usage. When not specified, defaults are used by the platform. Requests cannot exceed limits. This field is optional. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ This is a simplified API that maps to Kubernetes ResourceRequirements. The current default values are:\n resources:\n - name: cpu\n request: 1m\n limit: null\n - name: memory\n request: 32Mi\n limit: null\nMaximum length for this list is 5. Minimum length for this list is 1. Each resource name must be unique within this list.", "tolerations": "tolerations defines tolerations for the pods. tolerations is optional.\n\nWhen omitted, this means the user has no opinion and the platform is left to choose reasonable defaults. These defaults are subject to change over time. Defaults are empty/unset. Maximum length for this list is 10. Minimum length for this list is 1.", "topologySpreadConstraints": "topologySpreadConstraints defines rules for how openshift-state-metrics Pods should be distributed across topology domains such as zones, nodes, or other user-defined labels. topologySpreadConstraints is optional. This helps improve high availability and resource efficiency by avoiding placing too many replicas in the same failure domain.\n\nWhen omitted, this means no opinion and the platform is left to choose a default, which is subject to change over time. This field maps directly to the `topologySpreadConstraints` field in the Pod spec. Default is empty list. Maximum length for this list is 10. Minimum length for this list is 1. Entries must have unique topologyKey and whenUnsatisfiable pairs.", } @@ -337,7 +495,7 @@ var map_PrometheusConfig = map[string]string{ "nodeSelector": "nodeSelector defines the nodes on which the Pods are scheduled. nodeSelector is optional.\n\nWhen omitted, this means the user has no opinion and the platform is left to choose reasonable defaults. These defaults are subject to change over time. The current default value is `kubernetes.io/os: linux`. When specified, nodeSelector must contain at least one key-value pair (minimum of 1) and must not contain more than 10 entries.", "queryLogFile": "queryLogFile specifies the file to which PromQL queries are logged. This setting can be either a filename, in which case the queries are saved to an `emptyDir` volume at `/var/log/prometheus`, or a full path to a location where an `emptyDir` volume will be mounted and the queries saved. Writing to `/dev/stderr`, `/dev/stdout` or `/dev/null` is supported, but writing to any other `/dev/` path is not supported. Relative paths are also not supported. By default, PromQL queries are not logged. Must be an absolute path starting with `/` or a simple filename without path separators. Must not contain consecutive slashes, end with a slash, or include '..' path traversal. Must contain only alphanumeric characters, '.', '_', '-', or '/'. Must be between 1 and 255 characters in length.", "remoteWrite": "remoteWrite defines the remote write configuration, including URL, authentication, and relabeling settings. Remote write allows Prometheus to send metrics it collects to external long-term storage systems. When omitted, no remote write endpoints are configured. When provided, at least one configuration must be specified (minimum 1, maximum 10 items). Entries must have unique names (name is the list key).", - "resources": "resources defines the compute resource requests and limits for the Prometheus container. This includes CPU, memory and HugePages constraints to help control scheduling and resource usage. When not specified, defaults are used by the platform. Requests cannot exceed limits. Each entry must have a unique resource name. Minimum of 1 and maximum of 10 resource entries can be specified. The current default values are:\n resources:\n - name: cpu\n request: 4m\n - name: memory\n request: 40Mi", + "resources": "resources defines the compute resource requests and limits for the Prometheus container. This includes CPU, memory and HugePages constraints to help control scheduling and resource usage. When not specified, defaults are used by the platform. Requests cannot exceed limits. This field is optional. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ This is a simplified API that maps to Kubernetes ResourceRequirements. The current default values are:\n resources:\n - name: cpu\n request: 4m\n limit: null\n - name: memory\n request: 40Mi\n limit: null\nMaximum length for this list is 5. Minimum length for this list is 1. Each resource name must be unique within this list.", "retention": "retention configures how long Prometheus retains metrics data and how much storage it can use. When omitted, the platform chooses reasonable defaults (currently 15 days retention, no size limit).", "tolerations": "tolerations defines tolerations for the pods. tolerations is optional.\n\nWhen omitted, this means the user has no opinion and the platform is left to choose reasonable defaults. These defaults are subject to change over time. Defaults are empty/unset. Maximum length for this list is 10 Minimum length for this list is 1", "topologySpreadConstraints": "topologySpreadConstraints defines rules for how Prometheus Pods should be distributed across topology domains such as zones, nodes, or other user-defined labels. topologySpreadConstraints is optional. This helps improve high availability and resource efficiency by avoiding placing too many replicas in the same failure domain.\n\nWhen omitted, this means no opinion and the platform is left to choose a default, which is subject to change over time. This field maps directly to the `topologySpreadConstraints` field in the Pod spec. Default is empty list. Maximum length for this list is 10. Minimum length for this list is 1 Entries must have unique topologyKey and whenUnsatisfiable pairs.", @@ -351,7 +509,7 @@ func (PrometheusConfig) SwaggerDoc() map[string]string { var map_PrometheusOperatorAdmissionWebhookConfig = map[string]string{ "": "PrometheusOperatorAdmissionWebhookConfig provides configuration options for the admission webhook component of Prometheus Operator that runs in the `openshift-monitoring` namespace. The admission webhook validates PrometheusRule and AlertmanagerConfig objects, mutates PrometheusRule annotations, and converts AlertmanagerConfig objects between API versions.", - "resources": "resources defines the compute resource requests and limits for the prometheus-operator-admission-webhook container. This includes CPU, memory and HugePages constraints to help control scheduling and resource usage. When not specified, defaults are used by the platform. Requests cannot exceed limits. This field is optional. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ This is a simplified API that maps to Kubernetes ResourceRequirements. The current default values are:\n resources:\n - name: cpu\n request: 5m\n limit: null\n - name: memory\n request: 30Mi\n limit: null\nMaximum length for this list is 10. Minimum length for this list is 1. Each resource name must be unique within this list.", + "resources": "resources defines the compute resource requests and limits for the prometheus-operator-admission-webhook container. This includes CPU, memory and HugePages constraints to help control scheduling and resource usage. When not specified, defaults are used by the platform. Requests cannot exceed limits. This field is optional. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ This is a simplified API that maps to Kubernetes ResourceRequirements. The current default values are:\n resources:\n - name: cpu\n request: 5m\n limit: null\n - name: memory\n request: 30Mi\n limit: null\nMaximum length for this list is 5. Minimum length for this list is 1. Each resource name must be unique within this list.", "topologySpreadConstraints": "topologySpreadConstraints defines rules for how admission webhook Pods should be distributed across topology domains such as zones, nodes, or other user-defined labels. topologySpreadConstraints is optional. This helps improve high availability and resource efficiency by avoiding placing too many replicas in the same failure domain.\n\nWhen omitted, this means no opinion and the platform is left to choose a default, which is subject to change over time. This field maps directly to the `topologySpreadConstraints` field in the Pod spec. Default is empty list. Maximum length for this list is 10. Minimum length for this list is 1. Entries must have unique topologyKey and whenUnsatisfiable pairs.", } @@ -363,7 +521,7 @@ var map_PrometheusOperatorConfig = map[string]string{ "": "PrometheusOperatorConfig provides configuration options for the Prometheus Operator instance Use this configuration to control how the Prometheus Operator instance is deployed, how it logs, and how its pods are scheduled.", "logLevel": "logLevel defines the verbosity of logs emitted by Prometheus Operator. This field allows users to control the amount and severity of logs generated, which can be useful for debugging issues or reducing noise in production environments. Allowed values are Error, Warn, Info, and Debug. When set to Error, only errors will be logged. When set to Warn, both warnings and errors will be logged. When set to Info, general information, warnings, and errors will all be logged. When set to Debug, detailed debugging information will be logged. When omitted, this means no opinion and the platform is left to choose a reasonable default, that is subject to change over time. The current default value is `Info`.", "nodeSelector": "nodeSelector defines the nodes on which the Pods are scheduled nodeSelector is optional.\n\nWhen omitted, this means the user has no opinion and the platform is left to choose reasonable defaults. These defaults are subject to change over time. The current default value is `kubernetes.io/os: linux`. When specified, nodeSelector must contain at least 1 entry and must not contain more than 10 entries.", - "resources": "resources defines the compute resource requests and limits for the Prometheus Operator container. This includes CPU, memory and HugePages constraints to help control scheduling and resource usage. When not specified, defaults are used by the platform. Requests cannot exceed limits. This field is optional. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ This is a simplified API that maps to Kubernetes ResourceRequirements. The current default values are:\n resources:\n - name: cpu\n request: 4m\n limit: null\n - name: memory\n request: 40Mi\n limit: null\nMaximum length for this list is 10. Minimum length for this list is 1. Each resource name must be unique within this list.", + "resources": "resources defines the compute resource requests and limits for the Prometheus Operator container. This includes CPU, memory and HugePages constraints to help control scheduling and resource usage. When not specified, defaults are used by the platform. Requests cannot exceed limits. This field is optional. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ This is a simplified API that maps to Kubernetes ResourceRequirements. The current default values are:\n resources:\n - name: cpu\n request: 4m\n limit: null\n - name: memory\n request: 40Mi\n limit: null\nMaximum length for this list is 5. Minimum length for this list is 1. Each resource name must be unique within this list.", "tolerations": "tolerations defines tolerations for the pods. tolerations is optional.\n\nWhen omitted, this means the user has no opinion and the platform is left to choose reasonable defaults. These defaults are subject to change over time. Defaults are empty/unset. Maximum length for this list is 10. Minimum length for this list is 1.", "topologySpreadConstraints": "topologySpreadConstraints defines rules for how Prometheus Operator Pods should be distributed across topology domains such as zones, nodes, or other user-defined labels. topologySpreadConstraints is optional. This helps improve high availability and resource efficiency by avoiding placing too many replicas in the same failure domain.\n\nWhen omitted, this means no opinion and the platform is left to choose a default, which is subject to change over time. This field maps directly to the `topologySpreadConstraints` field in the Pod spec. Default is empty list. Maximum length for this list is 10. Minimum length for this list is 1. Entries must have unique topologyKey and whenUnsatisfiable pairs.", } @@ -516,6 +674,30 @@ func (TLSConfig) SwaggerDoc() map[string]string { return map_TLSConfig } +var map_TelemeterClientConfig = map[string]string{ + "": "TelemeterClientConfig provides configuration options for the Telemeter Client component that runs in the `openshift-monitoring` namespace. The Telemeter Client collects selected monitoring metrics and forwards them to Red Hat for telemetry purposes. At least one field must be specified.", + "nodeSelector": "nodeSelector defines the nodes on which the Pods are scheduled. nodeSelector is optional.\n\nWhen omitted, this means the user has no opinion and the platform is left to choose reasonable defaults. These defaults are subject to change over time. The current default value is `kubernetes.io/os: linux`. When specified, nodeSelector must contain at least 1 entry and must not contain more than 10 entries.", + "resources": "resources defines the compute resource requests and limits for the Telemeter Client container. This includes CPU, memory and HugePages constraints to help control scheduling and resource usage. When not specified, defaults are used by the platform. Requests cannot exceed limits. This field is optional. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ This is a simplified API that maps to Kubernetes ResourceRequirements. The current default values are:\n resources:\n - name: cpu\n request: 1m\n limit: null\n - name: memory\n request: 40Mi\n limit: null\nMaximum length for this list is 5. Minimum length for this list is 1. Each resource name must be unique within this list.", + "tolerations": "tolerations defines tolerations for the pods. tolerations is optional.\n\nWhen omitted, this means the user has no opinion and the platform is left to choose reasonable defaults. These defaults are subject to change over time. Defaults are empty/unset. Maximum length for this list is 10. Minimum length for this list is 1.", + "topologySpreadConstraints": "topologySpreadConstraints defines rules for how Telemeter Client Pods should be distributed across topology domains such as zones, nodes, or other user-defined labels. topologySpreadConstraints is optional. This helps improve high availability and resource efficiency by avoiding placing too many replicas in the same failure domain.\n\nWhen omitted, this means no opinion and the platform is left to choose a default, which is subject to change over time. This field maps directly to the `topologySpreadConstraints` field in the Pod spec. Default is empty list. Maximum length for this list is 10. Minimum length for this list is 1. Entries must have unique topologyKey and whenUnsatisfiable pairs.", +} + +func (TelemeterClientConfig) SwaggerDoc() map[string]string { + return map_TelemeterClientConfig +} + +var map_ThanosQuerierConfig = map[string]string{ + "": "ThanosQuerierConfig provides configuration options for the Thanos Querier component that runs in the `openshift-monitoring` namespace. At least one field must be specified; an empty thanosQuerierConfig object is not allowed.", + "nodeSelector": "nodeSelector defines the nodes on which the Pods are scheduled. nodeSelector is optional.\n\nWhen omitted, this means the user has no opinion and the platform is left to choose reasonable defaults. These defaults are subject to change over time. The current default value is `kubernetes.io/os: linux`. When specified, nodeSelector must contain at least 1 entry and must not contain more than 10 entries.", + "resources": "resources defines the compute resource requests and limits for the Thanos Querier container. resources is optional.\n\nWhen omitted, this means the user has no opinion and the platform is left to choose reasonable defaults. These defaults are subject to change over time. Requests cannot exceed limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ This is a simplified API that maps to Kubernetes ResourceRequirements. The current default values are:\n resources:\n - name: cpu\n request: 5m\n - name: memory\n request: 12Mi\nMaximum length for this list is 5. Minimum length for this list is 1. Each resource name must be unique within this list.", + "tolerations": "tolerations defines tolerations for the pods. tolerations is optional.\n\nWhen omitted, this means the user has no opinion and the platform is left to choose reasonable defaults. These defaults are subject to change over time. Defaults are empty/unset. Maximum length for this list is 10. Minimum length for this list is 1.", + "topologySpreadConstraints": "topologySpreadConstraints defines rules for how Thanos Querier Pods should be distributed across topology domains such as zones, nodes, or other user-defined labels. topologySpreadConstraints is optional. This helps improve high availability and resource efficiency by avoiding placing too many replicas in the same failure domain.\n\nWhen omitted, this means no opinion and the platform is left to choose a default, which is subject to change over time. This field maps directly to the `topologySpreadConstraints` field in the Pod spec. Defaults are empty/unset. Maximum length for this list is 10. Minimum length for this list is 1. Entries must have unique topologyKey and whenUnsatisfiable pairs.", +} + +func (ThanosQuerierConfig) SwaggerDoc() map[string]string { + return map_ThanosQuerierConfig +} + var map_UppercaseActionConfig = map[string]string{ "": "UppercaseActionConfig configures the Uppercase action. Maps the concatenated source_labels to their upper case and writes to target_label. Requires Prometheus >= v2.36.0.", "targetLabel": "targetLabel is the label name where the upper-cased value is written. Must be between 1 and 128 characters in length.", diff --git a/vendor/github.com/openshift/api/console/v1/types_console_plugin.go b/vendor/github.com/openshift/api/console/v1/types_console_plugin.go index 0160a4a242..c63db50d52 100644 --- a/vendor/github.com/openshift/api/console/v1/types_console_plugin.go +++ b/vendor/github.com/openshift/api/console/v1/types_console_plugin.go @@ -90,7 +90,6 @@ type ConsolePluginSpec struct { // OpenShift web console server CSP response header: // Content-Security-Policy: default-src 'self'; base-uri 'self'; script-src 'self' https://script1.com/ https://script2.com/ https://script3.com/; font-src 'self' https://font1.com/ https://font2.com/; img-src 'self' https://img1.com/; style-src 'self'; frame-src 'none'; object-src 'none' // - // +openshift:enable:FeatureGate=ConsolePluginContentSecurityPolicy // +kubebuilder:validation:MaxItems=5 // +kubebuilder:validation:XValidation:rule="self.map(x, x.values.map(y, y.size()).sum()).sum() < 8192",message="the total combined size of values of all directives must not exceed 8192 (8kb)" // +listType=map diff --git a/vendor/github.com/openshift/api/console/v1/zz_generated.featuregated-crd-manifests.yaml b/vendor/github.com/openshift/api/console/v1/zz_generated.featuregated-crd-manifests.yaml index caa676e691..26524d0a17 100644 --- a/vendor/github.com/openshift/api/console/v1/zz_generated.featuregated-crd-manifests.yaml +++ b/vendor/github.com/openshift/api/console/v1/zz_generated.featuregated-crd-manifests.yaml @@ -137,8 +137,7 @@ consoleplugins.console.openshift.io: CRDName: consoleplugins.console.openshift.io Capability: Console Category: "" - FeatureGates: - - ConsolePluginContentSecurityPolicy + FeatureGates: [] FilenameOperatorName: "" FilenameOperatorOrdering: "90" FilenameRunLevel: "" diff --git a/vendor/github.com/openshift/api/envtest-releases.yaml b/vendor/github.com/openshift/api/envtest-releases.yaml index 9ab6d63a6f..ea376ded55 100644 --- a/vendor/github.com/openshift/api/envtest-releases.yaml +++ b/vendor/github.com/openshift/api/envtest-releases.yaml @@ -1,4 +1,30 @@ releases: + v1.28.15: + envtest-v1.28.15-darwin-amd64.tar.gz: + hash: 79e04e7e264e6907da73d27c04d50be7bfa702b059e66f21331c9c8e16daa6ff960e750c1910804b7aa2a5bd79488a5bd082b76a7bab003e613a2cdbb2a8b80d + selfLink: https://storage.googleapis.com/openshift-kubebuilder-tools/envtest-v1.28.15-darwin-amd64.tar.gz + envtest-v1.28.15-darwin-arm64.tar.gz: + hash: 9646d169cf5161793ded60fa2ffde705d8fdde7c7c77833be9a77909f5804b909f28d56b2b0b93bd200d8057db3e37da9fe1683368ae38777ea62609135d0b4d + selfLink: https://storage.googleapis.com/openshift-kubebuilder-tools/envtest-v1.28.15-darwin-arm64.tar.gz + envtest-v1.28.15-linux-amd64.tar.gz: + hash: bc7e3deabbb3c7ee5e572e1392041af9d2fa40a51eb88dcabdaf57ff2a476e81efbd837a87f1f0849a5083dcb7c0b4b2fc16107da4cf3308805eee377fb383c1 + selfLink: https://storage.googleapis.com/openshift-kubebuilder-tools/envtest-v1.28.15-linux-amd64.tar.gz + envtest-v1.28.15-linux-arm64.tar.gz: + hash: 17ec8d5f7de118b66d1621d8b25b4f0a891e4a641d5e845d14c0da4cb2e258b6bf788fe6b5334b27beb12d9e67f7c8f2c2ded6023bf9a7e79ac3b483b8cdfcc2 + selfLink: https://storage.googleapis.com/openshift-kubebuilder-tools/envtest-v1.28.15-linux-arm64.tar.gz + v1.29.7: + envtest-v1.29.7-darwin-amd64.tar.gz: + hash: 4a97f9162ee882632aafda20e562dfb4011879f9ec6c8ffa4922e566933fc35292171cc837b50860e2e91238d820931a1fb8c3280541a180f6e9057178c71889 + selfLink: https://storage.googleapis.com/openshift-kubebuilder-tools/envtest-v1.29.7-darwin-amd64.tar.gz + envtest-v1.29.7-darwin-arm64.tar.gz: + hash: e810e04c14b1bba79c90c755ecb8b13094132a86339e78d1fe39029b9825b90b663d538b29d6ab4d905c22728efdf278881ee3f434f2215bc77157dee90a2bde + selfLink: https://storage.googleapis.com/openshift-kubebuilder-tools/envtest-v1.29.7-darwin-arm64.tar.gz + envtest-v1.29.7-linux-amd64.tar.gz: + hash: 91dcf683c95541691b8b9fdd332b0d3df56f8969b57c14797e990b9861ad71b2954f402913b5579f6afb99dce19817ba49542e2e276c7f70aeeb857a5ec1d57c + selfLink: https://storage.googleapis.com/openshift-kubebuilder-tools/envtest-v1.29.7-linux-amd64.tar.gz + envtest-v1.29.7-linux-arm64.tar.gz: + hash: 912333dee15e9cc068ebc5ef25591e16b9732cc70c23435efa72af352cffa9c25236d3982db910bd3a06172e9d57fc0d50585996f17c66ae1b80f0e2d3ef37b5 + selfLink: https://storage.googleapis.com/openshift-kubebuilder-tools/envtest-v1.29.7-linux-arm64.tar.gz v1.30.3: envtest-v1.30.3-darwin-amd64.tar.gz: hash: 81ab2ad5841522976d9a5fc58642b745cf308230b0f2e634acfb2d5c8f288ef837f7b82144a5e91db607d86885101e06dd473a68bcac0d71be2297edc4aaa92e diff --git a/vendor/github.com/openshift/api/etcd/install.go b/vendor/github.com/openshift/api/etcd/install.go index 7e7474152c..659816d814 100644 --- a/vendor/github.com/openshift/api/etcd/install.go +++ b/vendor/github.com/openshift/api/etcd/install.go @@ -5,6 +5,7 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" v1alpha1 "github.com/openshift/api/etcd/v1alpha1" + v1 "github.com/openshift/api/etcd/v1" ) const ( @@ -12,7 +13,7 @@ const ( ) var ( - schemeBuilder = runtime.NewSchemeBuilder(v1alpha1.Install) + schemeBuilder = runtime.NewSchemeBuilder(v1.Install, v1alpha1.Install) // Install is a function which adds every version of this group to a scheme Install = schemeBuilder.AddToScheme ) diff --git a/vendor/github.com/openshift/api/etcd/v1/Makefile b/vendor/github.com/openshift/api/etcd/v1/Makefile new file mode 100644 index 0000000000..6fa6435a24 --- /dev/null +++ b/vendor/github.com/openshift/api/etcd/v1/Makefile @@ -0,0 +1,3 @@ +.PHONY: test +test: + make -C ../../tests test GINKGO_EXTRA_ARGS=--focus="etcd.openshift.io/v1" diff --git a/vendor/github.com/openshift/api/etcd/v1/doc.go b/vendor/github.com/openshift/api/etcd/v1/doc.go new file mode 100644 index 0000000000..fe483fffd4 --- /dev/null +++ b/vendor/github.com/openshift/api/etcd/v1/doc.go @@ -0,0 +1,6 @@ +// +k8s:deepcopy-gen=package,register +// +k8s:defaulter-gen=TypeMeta +// +k8s:openapi-gen=true +// +openshift:featuregated-schema-gen=true +// +groupName=etcd.openshift.io +package v1 diff --git a/vendor/github.com/openshift/api/etcd/v1/register.go b/vendor/github.com/openshift/api/etcd/v1/register.go new file mode 100644 index 0000000000..1b59263043 --- /dev/null +++ b/vendor/github.com/openshift/api/etcd/v1/register.go @@ -0,0 +1,39 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + GroupName = "etcd.openshift.io" + GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + // Install is a function which adds this version to a scheme + Install = schemeBuilder.AddToScheme + + // SchemeGroupVersion generated code relies on this name + // Deprecated + SchemeGroupVersion = GroupVersion + // AddToScheme exists solely to keep the old generators creating valid code + // DEPRECATED + AddToScheme = schemeBuilder.AddToScheme +) + +// Resource generated code relies on this being here, but it logically belongs to the group +// DEPRECATED +func Resource(resource string) schema.GroupResource { + return schema.GroupResource{Group: GroupName, Resource: resource} +} + +func addKnownTypes(scheme *runtime.Scheme) error { + metav1.AddToGroupVersion(scheme, GroupVersion) + + scheme.AddKnownTypes(GroupVersion, + &PacemakerCluster{}, + &PacemakerClusterList{}, + ) + + return nil +} diff --git a/vendor/github.com/openshift/api/etcd/v1/types_pacemakercluster.go b/vendor/github.com/openshift/api/etcd/v1/types_pacemakercluster.go new file mode 100644 index 0000000000..a481f5e1bd --- /dev/null +++ b/vendor/github.com/openshift/api/etcd/v1/types_pacemakercluster.go @@ -0,0 +1,737 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// PacemakerCluster is used in Two Node OpenShift with Fencing deployments to monitor the health +// of etcd running under pacemaker. + +// Cluster-level condition types for PacemakerCluster.status.conditions +const ( + // ClusterHealthyConditionType tracks the overall health of the pacemaker cluster. + // This is an aggregate condition that reflects the health of all cluster-level conditions and node health. + // Specifically, it aggregates the following conditions: + // - ClusterInServiceConditionType + // - ClusterNodeCountAsExpectedConditionType + // - NodeHealthyConditionType (for each node) + // When True, the cluster is healthy with reason "ClusterHealthy". + // When False, the cluster is unhealthy with reason "ClusterUnhealthy". + ClusterHealthyConditionType = "Healthy" + + // ClusterInServiceConditionType tracks whether the cluster is in service (not in maintenance mode). + // Maintenance mode is a cluster-wide setting that prevents pacemaker from starting or stopping resources. + // When True, the cluster is in service with reason "InService". This is the normal operating state. + // When False, the cluster is in maintenance mode with reason "InMaintenance". This is an unexpected state. + ClusterInServiceConditionType = "InService" + + // ClusterNodeCountAsExpectedConditionType tracks whether the cluster has the expected number of nodes. + // For Two Node OpenShift with Fencing, we are expecting exactly 2 nodes. + // When True, the expected number of nodes are present with reason "AsExpected". + // When False, the node count is incorrect with reason "InsufficientNodes" or "ExcessiveNodes". + ClusterNodeCountAsExpectedConditionType = "NodeCountAsExpected" +) + +// ClusterHealthy condition reasons +const ( + // ClusterHealthyReasonHealthy means the pacemaker cluster is healthy and operating normally. + ClusterHealthyReasonHealthy = "ClusterHealthy" + + // ClusterHealthyReasonUnhealthy means the pacemaker cluster has issues that need investigation. + ClusterHealthyReasonUnhealthy = "ClusterUnhealthy" +) + +// ClusterInService condition reasons +const ( + // ClusterInServiceReasonInService means the cluster is in service (not in maintenance mode). + // This is the normal operating state. + ClusterInServiceReasonInService = "InService" + + // ClusterInServiceReasonInMaintenance means the cluster is in maintenance mode. + // In maintenance mode, pacemaker will not start or stop any resources. Entering and exiting this state requires + // manual user intervention, and is unexpected during normal cluster operation. + ClusterInServiceReasonInMaintenance = "InMaintenance" +) + +// ClusterNodeCountAsExpected condition reasons +const ( + // ClusterNodeCountAsExpectedReasonAsExpected means the expected number of nodes are present. + // For Two Node OpenShift with Fencing, we are expecting exactly 2 nodes. This is the expected healthy state. + ClusterNodeCountAsExpectedReasonAsExpected = "AsExpected" + + // ClusterNodeCountAsExpectedReasonInsufficientNodes means fewer nodes than expected are present. + // For Two Node OpenShift with Fencing, this means that less than 2 nodes are present. Under normal operation, this will only happen during + // a node replacement operation. It's also possible to enter this state with manual user intervention, but + // will also require user intervention to restore normal functionality. + ClusterNodeCountAsExpectedReasonInsufficientNodes = "InsufficientNodes" + + // ClusterNodeCountAsExpectedReasonExcessiveNodes means more nodes than expected are present. + // For Two Node OpenShift with Fencing, this means more than 2 nodes are present. This should be investigated as it is unexpected and should + // never happen during normal cluster operation. It is possible to enter this state with manual user intervention, + // but will also require user intervention to restore normal functionality. + ClusterNodeCountAsExpectedReasonExcessiveNodes = "ExcessiveNodes" +) + +// Node-level condition types for PacemakerCluster.status.nodes[].conditions +const ( + // NodeHealthyConditionType tracks the overall health of a node in the pacemaker cluster. + // This is an aggregate condition that reflects the health of all node-level conditions and resource health. + // Specifically, it aggregates the following conditions: + // - NodeOnlineConditionType + // - NodeInServiceConditionType + // - NodeActiveConditionType + // - NodeReadyConditionType + // - NodeCleanConditionType + // - NodeMemberConditionType + // - NodeFencingAvailableConditionType + // - NodeFencingHealthyConditionType + // - ResourceHealthyConditionType (for each resource in the node's resources list) + // When True, the node is healthy with reason "NodeHealthy". + // When False, the node is unhealthy with reason "NodeUnhealthy". + NodeHealthyConditionType = "Healthy" + + // NodeOnlineConditionType tracks whether a node is online. + // When True, the node is online with reason "Online". This is the normal operating state. + // When False, the node is offline with reason "Offline". This can occur during reboots, failures, maintenance, or replacement. + NodeOnlineConditionType = "Online" + + // NodeInServiceConditionType tracks whether a node is in service (not in maintenance mode). + // A node in maintenance mode is ignored by pacemaker while maintenance mode is active. + // When True, the node is in service with reason "InService". This is the normal operating state. + // When False, the node is in maintenance mode with reason "InMaintenance". This is an unexpected state. + NodeInServiceConditionType = "InService" + + // NodeActiveConditionType tracks whether a node is active (not in standby mode). + // When a node enters standby mode, pacemaker moves its resources to other nodes in the cluster. + // In Two Node OpenShift with Fencing, we do not use standby mode during normal operation. + // When True, the node is active with reason "Active". This is the normal operating state. + // When False, the node is in standby mode with reason "Standby". This is an unexpected state. + NodeActiveConditionType = "Active" + + // NodeReadyConditionType tracks whether a node is ready (not in a pending state). + // A node in a pending state is in the process of joining or leaving the cluster. + // When True, the node is ready with reason "Ready". This is the normal operating state. + // When False, the node is pending with reason "Pending". This is expected to be temporary. + NodeReadyConditionType = "Ready" + + // NodeCleanConditionType tracks whether a node is in a clean state. + // An unclean state means that pacemaker was unable to confirm the node's state, which signifies issues + // in fencing, communication, or configuration. + // When True, the node is clean with reason "Clean". This is the normal operating state. + // When False, the node is unclean with reason "Unclean". This is an unexpected state. + NodeCleanConditionType = "Clean" + + // NodeMemberConditionType tracks whether a node is a member of the cluster. + // Some configurations may use remote nodes or ping nodes, which are nodes that are not members. + // For Two Node OpenShift with Fencing, we expect both nodes to be members. + // When True, the node is a member with reason "Member". This is the normal operating state. + // When False, the node is not a member with reason "NotMember". This is an unexpected state. + NodeMemberConditionType = "Member" + + // NodeFencingAvailableConditionType tracks whether a node can be fenced by at least one fencing agent. + // For Two Node OpenShift with Fencing, each node needs at least one healthy fencing agent to ensure + // that the cluster can recover from a node failure via STONITH (Shoot The Other Node In The Head). + // When True, at least one fencing agent is healthy with reason "FencingAvailable". + // When False, all fencing agents are unhealthy with reason "FencingUnavailable". This is a critical + // state that should degrade the operator. + NodeFencingAvailableConditionType = "FencingAvailable" + + // NodeFencingHealthyConditionType tracks whether all fencing agents for a node are healthy. + // This is an aggregate condition that reflects the health of all fencing agents targeting this node. + // When True, all fencing agents are healthy with reason "FencingHealthy". + // When False, one or more fencing agents are unhealthy with reason "FencingUnhealthy". Warning events + // should be emitted for failing agents, but the operator should not be degraded if FencingAvailable is True. + NodeFencingHealthyConditionType = "FencingHealthy" +) + +// NodeHealthy condition reasons +const ( + // NodeHealthyReasonHealthy means the node is healthy and operating normally. + NodeHealthyReasonHealthy = "NodeHealthy" + + // NodeHealthyReasonUnhealthy means the node has issues that need investigation. + NodeHealthyReasonUnhealthy = "NodeUnhealthy" +) + +// NodeOnline condition reasons +const ( + // NodeOnlineReasonOnline means the node is online. This is the normal operating state. + NodeOnlineReasonOnline = "Online" + + // NodeOnlineReasonOffline means the node is offline. + NodeOnlineReasonOffline = "Offline" +) + +// NodeInService condition reasons +const ( + // NodeInServiceReasonInService means the node is in service (not in maintenance mode). + // This is the normal operating state. + NodeInServiceReasonInService = "InService" + + // NodeInServiceReasonInMaintenance means the node is in maintenance mode. + // This is an unexpected state. + NodeInServiceReasonInMaintenance = "InMaintenance" +) + +// NodeActive condition reasons +const ( + // NodeActiveReasonActive means the node is active (not in standby mode). + // This is the normal operating state. + NodeActiveReasonActive = "Active" + + // NodeActiveReasonStandby means the node is in standby mode. + // This is an unexpected state. + NodeActiveReasonStandby = "Standby" +) + +// NodeReady condition reasons +const ( + // NodeReadyReasonReady means the node is ready (not in a pending state). + // This is the normal operating state. + NodeReadyReasonReady = "Ready" + + // NodeReadyReasonPending means the node is joining or leaving the cluster. + // This state is expected to be temporary. + NodeReadyReasonPending = "Pending" +) + +// NodeClean condition reasons +const ( + // NodeCleanReasonClean means the node is in a clean state. + // This is the normal operating state. + NodeCleanReasonClean = "Clean" + + // NodeCleanReasonUnclean means the node is in an unclean state. + // Pacemaker was unable to confirm the node's state, which signifies issues in fencing, communication, or configuration. + // This is an unexpected state. + NodeCleanReasonUnclean = "Unclean" +) + +// NodeMember condition reasons +const ( + // NodeMemberReasonMember means the node is a member of the cluster. + // For Two Node OpenShift with Fencing, we expect both nodes to be members. This is the normal operating state. + NodeMemberReasonMember = "Member" + + // NodeMemberReasonNotMember means the node is not a member of the cluster. + // This is an unexpected state. + NodeMemberReasonNotMember = "NotMember" +) + +// NodeFencingAvailable condition reasons +const ( + // NodeFencingAvailableReasonAvailable means at least one fencing agent for this node is healthy. + // The cluster can fence this node if needed. This is the normal operating state. + NodeFencingAvailableReasonAvailable = "FencingAvailable" + + // NodeFencingAvailableReasonUnavailable means all fencing agents for this node are unhealthy. + // The cluster cannot fence this node, which compromises high availability. + // This is a critical state that should degrade the operator. + NodeFencingAvailableReasonUnavailable = "FencingUnavailable" +) + +// NodeFencingHealthy condition reasons +const ( + // NodeFencingHealthyReasonHealthy means all fencing agents for this node are healthy. + // This is the ideal operating state with full redundancy. + NodeFencingHealthyReasonHealthy = "FencingHealthy" + + // NodeFencingHealthyReasonUnhealthy means one or more fencing agents for this node are unhealthy. + // Warning events should be emitted for failing agents, but the operator should not be degraded + // if FencingAvailable is still True. + NodeFencingHealthyReasonUnhealthy = "FencingUnhealthy" +) + +// Resource-level condition types for PacemakerCluster.status.nodes[].resources[].conditions +const ( + // ResourceHealthyConditionType tracks the overall health of a pacemaker resource. + // This is an aggregate condition that reflects the health of all resource-level conditions. + // Specifically, it aggregates the following conditions: + // - ResourceInServiceConditionType + // - ResourceManagedConditionType + // - ResourceEnabledConditionType + // - ResourceOperationalConditionType + // - ResourceActiveConditionType + // - ResourceStartedConditionType + // - ResourceSchedulableConditionType + // When True, the resource is healthy with reason "ResourceHealthy". + // When False, the resource is unhealthy with reason "ResourceUnhealthy". + ResourceHealthyConditionType = "Healthy" + + // ResourceInServiceConditionType tracks whether a resource is in service (not in maintenance mode). + // Resources in maintenance mode are not monitored or moved by pacemaker. + // In Two Node OpenShift with Fencing, we do not expect any resources to be in maintenance mode. + // When True, the resource is in service with reason "InService". This is the normal operating state. + // When False, the resource is in maintenance mode with reason "InMaintenance". This is an unexpected state. + ResourceInServiceConditionType = "InService" + + // ResourceManagedConditionType tracks whether a resource is managed by pacemaker. + // Resources that are not managed by pacemaker are effectively invisible to the pacemaker HA logic. + // For Two Node OpenShift with Fencing, all resources are expected to be managed. + // When True, the resource is managed with reason "Managed". This is the normal operating state. + // When False, the resource is not managed with reason "Unmanaged". This is an unexpected state. + ResourceManagedConditionType = "Managed" + + // ResourceEnabledConditionType tracks whether a resource is enabled. + // Resources that are disabled are stopped and not automatically managed or started by the cluster. + // In Two Node OpenShift with Fencing, we do not expect any resources to be disabled. + // When True, the resource is enabled with reason "Enabled". This is the normal operating state. + // When False, the resource is disabled with reason "Disabled". This is an unexpected state. + ResourceEnabledConditionType = "Enabled" + + // ResourceOperationalConditionType tracks whether a resource is operational (not failed). + // A failed resource is one that is not able to start or is in an error state. + // When True, the resource is operational with reason "Operational". This is the normal operating state. + // When False, the resource has failed with reason "Failed". This is an unexpected state. + ResourceOperationalConditionType = "Operational" + + // ResourceActiveConditionType tracks whether a resource is active. + // An active resource is running on a cluster node. + // In Two Node OpenShift with Fencing, all resources are expected to be active. + // When True, the resource is active with reason "Active". This is the normal operating state. + // When False, the resource is not active with reason "Inactive". This is an unexpected state. + ResourceActiveConditionType = "Active" + + // ResourceStartedConditionType tracks whether a resource is started. + // It's normal for a resource like etcd to become stopped in the event of a quorum loss event because + // the pacemaker recovery logic will fence a node and restore etcd quorum on the surviving node as a cluster-of-one. + // A resource that stays stopped for an extended period of time is an unexpected state and should be investigated. + // When True, the resource is started with reason "Started". This is the normal operating state. + // When False, the resource is not started with reason "Stopped". This is expected to be temporary. + ResourceStartedConditionType = "Started" + + // ResourceSchedulableConditionType tracks whether a resource is schedulable (not blocked). + // A resource that is not schedulable is unable to start or move to a different node. + // In Two Node OpenShift with Fencing, we do not expect any resources to be unschedulable. + // When True, the resource is schedulable with reason "Schedulable". This is the normal operating state. + // When False, the resource is not schedulable with reason "Unschedulable". This is an unexpected state. + ResourceSchedulableConditionType = "Schedulable" +) + +// ResourceHealthy condition reasons +const ( + // ResourceHealthyReasonHealthy means the resource is healthy and operating normally. + ResourceHealthyReasonHealthy = "ResourceHealthy" + + // ResourceHealthyReasonUnhealthy means the resource has issues that need investigation. + ResourceHealthyReasonUnhealthy = "ResourceUnhealthy" +) + +// ResourceInService condition reasons +const ( + // ResourceInServiceReasonInService means the resource is in service (not in maintenance mode). + // This is the normal operating state. + ResourceInServiceReasonInService = "InService" + + // ResourceInServiceReasonInMaintenance means the resource is in maintenance mode. + // Resources in maintenance mode are not monitored or moved by pacemaker. This is an unexpected state. + ResourceInServiceReasonInMaintenance = "InMaintenance" +) + +// ResourceManaged condition reasons +const ( + // ResourceManagedReasonManaged means the resource is managed by pacemaker. + // This is the normal operating state. + ResourceManagedReasonManaged = "Managed" + + // ResourceManagedReasonUnmanaged means the resource is not managed by pacemaker. + // Resources that are not managed by pacemaker are effectively invisible to the pacemaker HA logic. + // This is an unexpected state. + ResourceManagedReasonUnmanaged = "Unmanaged" +) + +// ResourceEnabled condition reasons +const ( + // ResourceEnabledReasonEnabled means the resource is enabled. + // This is the normal operating state. + ResourceEnabledReasonEnabled = "Enabled" + + // ResourceEnabledReasonDisabled means the resource is disabled. + // Resources that are disabled are stopped and not automatically managed or started by the cluster. + // This is an unexpected state. + ResourceEnabledReasonDisabled = "Disabled" +) + +// ResourceOperational condition reasons +const ( + // ResourceOperationalReasonOperational means the resource is operational (not failed). + // This is the normal operating state. + ResourceOperationalReasonOperational = "Operational" + + // ResourceOperationalReasonFailed means the resource has failed. + // A failed resource is one that is not able to start or is in an error state. This is an unexpected state. + ResourceOperationalReasonFailed = "Failed" +) + +// ResourceActive condition reasons +const ( + // ResourceActiveReasonActive means the resource is active. + // An active resource is running on a cluster node. This is the normal operating state. + ResourceActiveReasonActive = "Active" + + // ResourceActiveReasonInactive means the resource is not active. + // This is an unexpected state. + ResourceActiveReasonInactive = "Inactive" +) + +// ResourceStarted condition reasons +const ( + // ResourceStartedReasonStarted means the resource is started. + // This is the normal operating state. + ResourceStartedReasonStarted = "Started" + + // ResourceStartedReasonStopped means the resource is stopped. + // It's normal for a resource like etcd to become stopped in the event of a quorum loss event because + // the pacemaker recovery logic will fence a node and restore etcd quorum on the surviving node as a cluster-of-one. + // A resource that stays stopped for an extended period of time is an unexpected state and should be investigated. + ResourceStartedReasonStopped = "Stopped" +) + +// ResourceSchedulable condition reasons +const ( + // ResourceSchedulableReasonSchedulable means the resource is schedulable (not blocked). + // This is the normal operating state. + ResourceSchedulableReasonSchedulable = "Schedulable" + + // ResourceSchedulableReasonUnschedulable means the resource is not schedulable (blocked). + // A resource that is not schedulable is unable to start or move to a different node. This is an unexpected state. + ResourceSchedulableReasonUnschedulable = "Unschedulable" +) + +// PacemakerNodeAddressType represents the type of a node address. +// Currently only InternalIP is supported. +// +kubebuilder:validation:Enum=InternalIP +// +enum +type PacemakerNodeAddressType string + +const ( + // PacemakerNodeInternalIP is an internal IP address assigned to the node. + // This is typically the IP address used for intra-cluster communication. + PacemakerNodeInternalIP PacemakerNodeAddressType = "InternalIP" +) + +// PacemakerNodeAddress contains information for a node's address. +// This is similar to corev1.NodeAddress but adds validation for IP addresses. +type PacemakerNodeAddress struct { + // type is the type of node address. + // Currently only "InternalIP" is supported. + // +required + Type PacemakerNodeAddressType `json:"type,omitempty"` + + // address is the node address. + // For InternalIP, this must be a valid global unicast IPv4 or IPv6 address in canonical form. + // Canonical form means the shortest standard representation (e.g., "192.168.1.1" not "192.168.001.001", + // or "2001:db8::1" not "2001:0db8::1"). Maximum length is 39 characters (full IPv6 address). + // Global unicast includes private/RFC1918 addresses but excludes loopback, link-local, and multicast. + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=39 + // +kubebuilder:validation:XValidation:rule="isIP(self) && ip.isCanonical(self) && ip(self).isGlobalUnicast()",message="must be a valid global unicast IPv4 or IPv6 address in canonical form" + // +required + Address string `json:"address,omitempty"` +} + +// PacemakerClusterResourceName represents the name of a pacemaker resource. +// Fencing agents are tracked separately in the fencingAgents field. +// +kubebuilder:validation:Enum=Kubelet;Etcd +// +enum +type PacemakerClusterResourceName string + +// PacemakerClusterResourceName values +const ( + // PacemakerClusterResourceNameKubelet is the kubelet pacemaker resource. + // The kubelet resource is a prerequisite for etcd in Two Node OpenShift with Fencing deployments. + PacemakerClusterResourceNameKubelet PacemakerClusterResourceName = "Kubelet" + + // PacemakerClusterResourceNameEtcd is the etcd pacemaker resource. + // The etcd resource may temporarily transition to stopped during pacemaker quorum-recovery operations. + PacemakerClusterResourceNameEtcd PacemakerClusterResourceName = "Etcd" +) + +// FencingMethod represents the method used by a fencing agent to isolate failed nodes. +// Valid values are "Redfish" and "IPMI". +// +kubebuilder:validation:Enum=Redfish;IPMI +// +enum +type FencingMethod string + +// FencingMethod values +const ( + // FencingMethodRedfish uses Redfish, a standard RESTful API for server management. + FencingMethodRedfish FencingMethod = "Redfish" + + // FencingMethodIPMI uses IPMI (Intelligent Platform Management Interface), a hardware management interface. + FencingMethodIPMI FencingMethod = "IPMI" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// PacemakerCluster represents the current state of the pacemaker cluster as reported by the pcs status command. +// PacemakerCluster is a cluster-scoped singleton resource. The name of this instance is "cluster". This +// resource provides a view into the health and status of a pacemaker-managed cluster in Two Node OpenShift with Fencing deployments. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=pacemakerclusters,scope=Cluster,singular=pacemakercluster +// +kubebuilder:subresource:status +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/2544 +// +openshift:file-pattern=cvoRunLevel=0000_25,operatorName=etcd,operatorOrdering=01,operatorComponent=two-node-fencing +// +openshift:enable:FeatureGate=DualReplica +// +kubebuilder:validation:XValidation:rule="self.metadata.name == 'cluster'",message="PacemakerCluster must be named 'cluster'" +// +kubebuilder:validation:XValidation:rule="!has(oldSelf.status) || has(self.status)",message="status may not be removed once set" +type PacemakerCluster struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +required + metav1.ObjectMeta `json:"metadata,omitempty"` + + // status contains the actual pacemaker cluster status information collected from the cluster. + // The goal of this status is to be able to quickly identify if pacemaker is in a healthy state. + // In Two Node OpenShift with Fencing, a healthy pacemaker cluster has 2 nodes, both of which have healthy kubelet, etcd, and fencing resources. + // This field is optional on creation - the status collector populates it immediately after creating + // the resource via the status subresource. + // +optional + Status PacemakerClusterStatus `json:"status,omitzero"` +} + +// PacemakerClusterStatus contains the actual pacemaker cluster status information. As part of validating the status +// object, we need to ensure that the lastUpdated timestamp may not be set to an earlier timestamp than the current value. +// The validation rule checks if oldSelf has lastUpdated before comparing, to handle the initial status creation case. +// +kubebuilder:validation:XValidation:rule="!has(oldSelf.lastUpdated) || self.lastUpdated >= oldSelf.lastUpdated",message="lastUpdated may not be set to an earlier timestamp" +type PacemakerClusterStatus struct { + // conditions represent the observations of the pacemaker cluster's current state. + // Known condition types are: "Healthy", "InService", "NodeCountAsExpected". + // The "Healthy" condition is an aggregate that tracks the overall health of the cluster. + // The "InService" condition tracks whether the cluster is in service (not in maintenance mode). + // The "NodeCountAsExpected" condition tracks whether the expected number of nodes are present. + // Each of these conditions is required, so the array must contain at least 3 items. + // +listType=map + // +listMapKey=type + // +kubebuilder:validation:MinItems=3 + // +kubebuilder:validation:MaxItems=8 + // +kubebuilder:validation:XValidation:rule="self.exists(c, c.type == 'Healthy')",message="conditions must contain a condition of type Healthy" + // +kubebuilder:validation:XValidation:rule="self.exists(c, c.type == 'InService')",message="conditions must contain a condition of type InService" + // +kubebuilder:validation:XValidation:rule="self.exists(c, c.type == 'NodeCountAsExpected')",message="conditions must contain a condition of type NodeCountAsExpected" + // +required + Conditions []metav1.Condition `json:"conditions,omitempty"` + + // lastUpdated is the timestamp when this status was last updated. This is useful for identifying + // stale status reports. It must be a valid timestamp in RFC3339 format. Once set, this field cannot + // be removed and cannot be set to an earlier timestamp than the current value. + // +kubebuilder:validation:Format=date-time + // +required + LastUpdated metav1.Time `json:"lastUpdated,omitempty,omitzero"` + + // nodes provides detailed status for each control-plane node in the Pacemaker cluster. + // While Pacemaker supports up to 32 nodes, the limit is set to 5 (max OpenShift control-plane nodes). + // For Two Node OpenShift with Fencing, exactly 2 nodes are expected in a healthy cluster. + // An empty list indicates a catastrophic failure where Pacemaker reports no nodes. + // +listType=map + // +listMapKey=nodeName + // +kubebuilder:validation:MinItems=0 + // +kubebuilder:validation:MaxItems=5 + // +required + Nodes *[]PacemakerClusterNodeStatus `json:"nodes,omitempty"` +} + +// PacemakerClusterNodeStatus represents the status of a single node in the pacemaker cluster including +// the node's conditions and the health of critical resources running on that node. +type PacemakerClusterNodeStatus struct { + // conditions represent the observations of the node's current state. + // Known condition types are: "Healthy", "Online", "InService", "Active", "Ready", "Clean", "Member", + // "FencingAvailable", "FencingHealthy". + // The "Healthy" condition is an aggregate that tracks the overall health of the node. + // The "Online" condition tracks whether the node is online. + // The "InService" condition tracks whether the node is in service (not in maintenance mode). + // The "Active" condition tracks whether the node is active (not in standby mode). + // The "Ready" condition tracks whether the node is ready (not in a pending state). + // The "Clean" condition tracks whether the node is in a clean (status known) state. + // The "Member" condition tracks whether the node is a member of the cluster. + // The "FencingAvailable" condition tracks whether this node can be fenced by at least one healthy agent. + // The "FencingHealthy" condition tracks whether all fencing agents for this node are healthy. + // Each of these conditions is required, so the array must contain at least 9 items. + // +listType=map + // +listMapKey=type + // +kubebuilder:validation:MinItems=9 + // +kubebuilder:validation:MaxItems=16 + // +kubebuilder:validation:XValidation:rule="self.exists(c, c.type == 'Healthy')",message="conditions must contain a condition of type Healthy" + // +kubebuilder:validation:XValidation:rule="self.exists(c, c.type == 'Online')",message="conditions must contain a condition of type Online" + // +kubebuilder:validation:XValidation:rule="self.exists(c, c.type == 'InService')",message="conditions must contain a condition of type InService" + // +kubebuilder:validation:XValidation:rule="self.exists(c, c.type == 'Active')",message="conditions must contain a condition of type Active" + // +kubebuilder:validation:XValidation:rule="self.exists(c, c.type == 'Ready')",message="conditions must contain a condition of type Ready" + // +kubebuilder:validation:XValidation:rule="self.exists(c, c.type == 'Clean')",message="conditions must contain a condition of type Clean" + // +kubebuilder:validation:XValidation:rule="self.exists(c, c.type == 'Member')",message="conditions must contain a condition of type Member" + // +kubebuilder:validation:XValidation:rule="self.exists(c, c.type == 'FencingAvailable')",message="conditions must contain a condition of type FencingAvailable" + // +kubebuilder:validation:XValidation:rule="self.exists(c, c.type == 'FencingHealthy')",message="conditions must contain a condition of type FencingHealthy" + // +required + Conditions []metav1.Condition `json:"conditions,omitempty"` + + // nodeName is the name of the node. This is expected to match the Kubernetes node's name, which must be a lowercase + // RFC 1123 subdomain consisting of lowercase alphanumeric characters, '-' or '.', starting and ending with + // an alphanumeric character, and be at most 253 characters in length. + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=253 + // +kubebuilder:validation:XValidation:rule="!format.dns1123Subdomain().validate(self).hasValue()",message="nodeName must be a lowercase RFC 1123 subdomain consisting of lowercase alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character" + // +required + NodeName string `json:"nodeName,omitempty"` + + // addresses is a list of IP addresses for the node. + // Pacemaker allows multiple IP addresses for Corosync communication between nodes. + // The first address in this list is used for IP-based peer URLs for etcd membership. + // Each address must be a valid global unicast IPv4 or IPv6 address in canonical form + // (e.g., "192.168.1.1" not "192.168.001.001", or "2001:db8::1" not "2001:0db8::1"). + // This excludes loopback, link-local, and multicast addresses. + // +listType=atomic + // +kubebuilder:validation:MinItems=1 + // +kubebuilder:validation:MaxItems=8 + // +required + Addresses []PacemakerNodeAddress `json:"addresses,omitempty"` + + // resources contains the status of pacemaker resources scheduled on this node. + // Each resource entry includes the resource name and its health conditions. + // For Two Node OpenShift with Fencing, we track Kubelet and Etcd resources per node. + // Both resources are required to be present, so the array must contain at least 2 items. + // Valid resource names are "Kubelet" and "Etcd". + // Fencing agents are tracked separately in the fencingAgents field. + // +listType=map + // +listMapKey=name + // +kubebuilder:validation:MinItems=2 + // +kubebuilder:validation:MaxItems=8 + // +kubebuilder:validation:XValidation:rule="self.exists(r, r.name == 'Kubelet')",message="resources must contain a resource named Kubelet" + // +kubebuilder:validation:XValidation:rule="self.exists(r, r.name == 'Etcd')",message="resources must contain a resource named Etcd" + // +required + Resources []PacemakerClusterResourceStatus `json:"resources,omitempty"` + + // fencingAgents contains the status of fencing agents that can fence this node. + // Unlike resources (which are scheduled to run on this node), fencing agents are mapped + // to the node they can fence (their target), not the node where monitoring operations run. + // Each fencing agent entry includes a unique name, fencing type, target node, and health conditions. + // A node is considered fence-capable if at least one fencing agent is healthy. + // A healthy node is expected to have at least 1 fencing agent, but the list may be empty + // when fencing agent discovery fails. + // Names must be unique within this array. + // +listType=map + // +listMapKey=name + // +kubebuilder:validation:MinItems=0 + // +kubebuilder:validation:MaxItems=8 + // +kubebuilder:validation:XValidation:rule="self.all(x, self.exists_one(y, x.name == y.name))",message="fencing agent names must be unique" + // +required + FencingAgents []PacemakerClusterFencingAgentStatus `json:"fencingAgents,omitempty"` +} + +// PacemakerClusterFencingAgentStatus represents the status of a fencing agent that can fence a node. +// Fencing agents are STONITH (Shoot The Other Node In The Head) devices used to isolate failed nodes. +// Unlike regular pacemaker resources, fencing agents are mapped to their target node (the node they +// can fence), not the node where their monitoring operations are scheduled. +type PacemakerClusterFencingAgentStatus struct { + // conditions represent the observations of the fencing agent's current state. + // Known condition types are: "Healthy", "InService", "Managed", "Enabled", "Operational", + // "Active", "Started", "Schedulable". + // The "Healthy" condition is an aggregate that tracks the overall health of the fencing agent. + // The "InService" condition tracks whether the fencing agent is in service (not in maintenance mode). + // The "Managed" condition tracks whether the fencing agent is managed by pacemaker. + // The "Enabled" condition tracks whether the fencing agent is enabled. + // The "Operational" condition tracks whether the fencing agent is operational (not failed). + // The "Active" condition tracks whether the fencing agent is active (available to be used). + // The "Started" condition tracks whether the fencing agent is started. + // The "Schedulable" condition tracks whether the fencing agent is schedulable (not blocked). + // Each of these conditions is required, so the array must contain at least 8 items. + // +listType=map + // +listMapKey=type + // +kubebuilder:validation:MinItems=8 + // +kubebuilder:validation:MaxItems=16 + // +kubebuilder:validation:XValidation:rule="self.exists(c, c.type == 'Healthy')",message="conditions must contain a condition of type Healthy" + // +kubebuilder:validation:XValidation:rule="self.exists(c, c.type == 'InService')",message="conditions must contain a condition of type InService" + // +kubebuilder:validation:XValidation:rule="self.exists(c, c.type == 'Managed')",message="conditions must contain a condition of type Managed" + // +kubebuilder:validation:XValidation:rule="self.exists(c, c.type == 'Enabled')",message="conditions must contain a condition of type Enabled" + // +kubebuilder:validation:XValidation:rule="self.exists(c, c.type == 'Operational')",message="conditions must contain a condition of type Operational" + // +kubebuilder:validation:XValidation:rule="self.exists(c, c.type == 'Active')",message="conditions must contain a condition of type Active" + // +kubebuilder:validation:XValidation:rule="self.exists(c, c.type == 'Started')",message="conditions must contain a condition of type Started" + // +kubebuilder:validation:XValidation:rule="self.exists(c, c.type == 'Schedulable')",message="conditions must contain a condition of type Schedulable" + // +required + Conditions []metav1.Condition `json:"conditions,omitempty"` + + // name is the unique identifier for this fencing agent (e.g., "master-0_redfish"). + // The name must be unique within the fencingAgents array for this node. + // It may contain alphanumeric characters, dots, hyphens, and underscores. + // Maximum length is 300 characters, providing headroom beyond the typical format of + // _ (253 for RFC 1123 node name + 1 underscore + type). + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=300 + // +kubebuilder:validation:XValidation:rule="self.matches('^[a-zA-Z0-9._-]+$')",message="name must contain only alphanumeric characters, dots, hyphens, and underscores" + // +required + Name string `json:"name,omitempty"` + + // method is the fencing method used by this agent. + // Valid values are "Redfish" and "IPMI". + // Redfish is a standard RESTful API for server management. + // IPMI (Intelligent Platform Management Interface) is a hardware management interface. + // +required + Method FencingMethod `json:"method,omitempty"` +} + +// PacemakerClusterResourceStatus represents the status of a pacemaker resource scheduled on a node. +// A pacemaker resource is a unit of work managed by pacemaker. In pacemaker terminology, resources are services or +// applications that pacemaker monitors, starts, stops, and moves between nodes to maintain high availability. +// For Two Node OpenShift with Fencing, we track two resources per node: +// - Kubelet (the Kubernetes node agent and a prerequisite for etcd) +// - Etcd (the distributed key-value store) +// +// Fencing agents are tracked separately in the fencingAgents field because they are mapped to +// their target node (the node they can fence), not the node where monitoring operations are scheduled. +type PacemakerClusterResourceStatus struct { + // conditions represent the observations of the resource's current state. + // Known condition types are: "Healthy", "InService", "Managed", "Enabled", "Operational", + // "Active", "Started", "Schedulable". + // The "Healthy" condition is an aggregate that tracks the overall health of the resource. + // The "InService" condition tracks whether the resource is in service (not in maintenance mode). + // The "Managed" condition tracks whether the resource is managed by pacemaker. + // The "Enabled" condition tracks whether the resource is enabled. + // The "Operational" condition tracks whether the resource is operational (not failed). + // The "Active" condition tracks whether the resource is active (available to be used). + // The "Started" condition tracks whether the resource is started. + // The "Schedulable" condition tracks whether the resource is schedulable (not blocked). + // Each of these conditions is required, so the array must contain at least 8 items. + // +listType=map + // +listMapKey=type + // +kubebuilder:validation:MinItems=8 + // +kubebuilder:validation:MaxItems=16 + // +kubebuilder:validation:XValidation:rule="self.exists(c, c.type == 'Healthy')",message="conditions must contain a condition of type Healthy" + // +kubebuilder:validation:XValidation:rule="self.exists(c, c.type == 'InService')",message="conditions must contain a condition of type InService" + // +kubebuilder:validation:XValidation:rule="self.exists(c, c.type == 'Managed')",message="conditions must contain a condition of type Managed" + // +kubebuilder:validation:XValidation:rule="self.exists(c, c.type == 'Enabled')",message="conditions must contain a condition of type Enabled" + // +kubebuilder:validation:XValidation:rule="self.exists(c, c.type == 'Operational')",message="conditions must contain a condition of type Operational" + // +kubebuilder:validation:XValidation:rule="self.exists(c, c.type == 'Active')",message="conditions must contain a condition of type Active" + // +kubebuilder:validation:XValidation:rule="self.exists(c, c.type == 'Started')",message="conditions must contain a condition of type Started" + // +kubebuilder:validation:XValidation:rule="self.exists(c, c.type == 'Schedulable')",message="conditions must contain a condition of type Schedulable" + // +required + Conditions []metav1.Condition `json:"conditions,omitempty"` + + // name is the name of the pacemaker resource. + // Valid values are "Kubelet" and "Etcd". + // The Kubelet resource is a prerequisite for etcd in Two Node OpenShift with Fencing deployments. + // The Etcd resource may temporarily transition to stopped during pacemaker quorum-recovery operations. + // Fencing agents are tracked separately in the node's fencingAgents field. + // +required + Name PacemakerClusterResourceName `json:"name,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// PacemakerClusterList contains a list of PacemakerCluster objects. PacemakerCluster is a cluster-scoped singleton +// resource; only one instance named "cluster" may exist. This list type exists only to satisfy Kubernetes API +// conventions. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type PacemakerClusterList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata,omitempty"` + + // items is a list of PacemakerCluster objects. + Items []PacemakerCluster `json:"items"` +} diff --git a/vendor/github.com/openshift/api/etcd/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/etcd/v1/zz_generated.deepcopy.go new file mode 100644 index 0000000000..c529240e40 --- /dev/null +++ b/vendor/github.com/openshift/api/etcd/v1/zz_generated.deepcopy.go @@ -0,0 +1,210 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +// Code generated by codegen. DO NOT EDIT. + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PacemakerCluster) DeepCopyInto(out *PacemakerCluster) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PacemakerCluster. +func (in *PacemakerCluster) DeepCopy() *PacemakerCluster { + if in == nil { + return nil + } + out := new(PacemakerCluster) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PacemakerCluster) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PacemakerClusterFencingAgentStatus) DeepCopyInto(out *PacemakerClusterFencingAgentStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PacemakerClusterFencingAgentStatus. +func (in *PacemakerClusterFencingAgentStatus) DeepCopy() *PacemakerClusterFencingAgentStatus { + if in == nil { + return nil + } + out := new(PacemakerClusterFencingAgentStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PacemakerClusterList) DeepCopyInto(out *PacemakerClusterList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PacemakerCluster, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PacemakerClusterList. +func (in *PacemakerClusterList) DeepCopy() *PacemakerClusterList { + if in == nil { + return nil + } + out := new(PacemakerClusterList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PacemakerClusterList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PacemakerClusterNodeStatus) DeepCopyInto(out *PacemakerClusterNodeStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Addresses != nil { + in, out := &in.Addresses, &out.Addresses + *out = make([]PacemakerNodeAddress, len(*in)) + copy(*out, *in) + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]PacemakerClusterResourceStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.FencingAgents != nil { + in, out := &in.FencingAgents, &out.FencingAgents + *out = make([]PacemakerClusterFencingAgentStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PacemakerClusterNodeStatus. +func (in *PacemakerClusterNodeStatus) DeepCopy() *PacemakerClusterNodeStatus { + if in == nil { + return nil + } + out := new(PacemakerClusterNodeStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PacemakerClusterResourceStatus) DeepCopyInto(out *PacemakerClusterResourceStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PacemakerClusterResourceStatus. +func (in *PacemakerClusterResourceStatus) DeepCopy() *PacemakerClusterResourceStatus { + if in == nil { + return nil + } + out := new(PacemakerClusterResourceStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PacemakerClusterStatus) DeepCopyInto(out *PacemakerClusterStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.LastUpdated.DeepCopyInto(&out.LastUpdated) + if in.Nodes != nil { + in, out := &in.Nodes, &out.Nodes + *out = new([]PacemakerClusterNodeStatus) + if **in != nil { + in, out := *in, *out + *out = make([]PacemakerClusterNodeStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PacemakerClusterStatus. +func (in *PacemakerClusterStatus) DeepCopy() *PacemakerClusterStatus { + if in == nil { + return nil + } + out := new(PacemakerClusterStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PacemakerNodeAddress) DeepCopyInto(out *PacemakerNodeAddress) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PacemakerNodeAddress. +func (in *PacemakerNodeAddress) DeepCopy() *PacemakerNodeAddress { + if in == nil { + return nil + } + out := new(PacemakerNodeAddress) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/openshift/api/etcd/v1/zz_generated.featuregated-crd-manifests.yaml b/vendor/github.com/openshift/api/etcd/v1/zz_generated.featuregated-crd-manifests.yaml new file mode 100644 index 0000000000..1a11a877c4 --- /dev/null +++ b/vendor/github.com/openshift/api/etcd/v1/zz_generated.featuregated-crd-manifests.yaml @@ -0,0 +1,23 @@ +pacemakerclusters.etcd.openshift.io: + Annotations: {} + ApprovedPRNumber: https://github.com/openshift/api/pull/2544 + CRDName: pacemakerclusters.etcd.openshift.io + Capability: "" + Category: "" + FeatureGates: + - DualReplica + FilenameOperatorName: etcd + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_25" + GroupName: etcd.openshift.io + HasStatus: true + KindName: PacemakerCluster + Labels: {} + PluralName: pacemakerclusters + PrinterColumns: [] + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: + - DualReplica + Version: v1 + diff --git a/vendor/github.com/openshift/api/etcd/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/etcd/v1/zz_generated.swagger_doc_generated.go new file mode 100644 index 0000000000..e9e47b47cf --- /dev/null +++ b/vendor/github.com/openshift/api/etcd/v1/zz_generated.swagger_doc_generated.go @@ -0,0 +1,89 @@ +package v1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_PacemakerCluster = map[string]string{ + "": "PacemakerCluster represents the current state of the pacemaker cluster as reported by the pcs status command. PacemakerCluster is a cluster-scoped singleton resource. The name of this instance is \"cluster\". This resource provides a view into the health and status of a pacemaker-managed cluster in Two Node OpenShift with Fencing deployments.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "status": "status contains the actual pacemaker cluster status information collected from the cluster. The goal of this status is to be able to quickly identify if pacemaker is in a healthy state. In Two Node OpenShift with Fencing, a healthy pacemaker cluster has 2 nodes, both of which have healthy kubelet, etcd, and fencing resources. This field is optional on creation - the status collector populates it immediately after creating the resource via the status subresource.", +} + +func (PacemakerCluster) SwaggerDoc() map[string]string { + return map_PacemakerCluster +} + +var map_PacemakerClusterFencingAgentStatus = map[string]string{ + "": "PacemakerClusterFencingAgentStatus represents the status of a fencing agent that can fence a node. Fencing agents are STONITH (Shoot The Other Node In The Head) devices used to isolate failed nodes. Unlike regular pacemaker resources, fencing agents are mapped to their target node (the node they can fence), not the node where their monitoring operations are scheduled.", + "conditions": "conditions represent the observations of the fencing agent's current state. Known condition types are: \"Healthy\", \"InService\", \"Managed\", \"Enabled\", \"Operational\", \"Active\", \"Started\", \"Schedulable\". The \"Healthy\" condition is an aggregate that tracks the overall health of the fencing agent. The \"InService\" condition tracks whether the fencing agent is in service (not in maintenance mode). The \"Managed\" condition tracks whether the fencing agent is managed by pacemaker. The \"Enabled\" condition tracks whether the fencing agent is enabled. The \"Operational\" condition tracks whether the fencing agent is operational (not failed). The \"Active\" condition tracks whether the fencing agent is active (available to be used). The \"Started\" condition tracks whether the fencing agent is started. The \"Schedulable\" condition tracks whether the fencing agent is schedulable (not blocked). Each of these conditions is required, so the array must contain at least 8 items.", + "name": "name is the unique identifier for this fencing agent (e.g., \"master-0_redfish\"). The name must be unique within the fencingAgents array for this node. It may contain alphanumeric characters, dots, hyphens, and underscores. Maximum length is 300 characters, providing headroom beyond the typical format of _ (253 for RFC 1123 node name + 1 underscore + type).", + "method": "method is the fencing method used by this agent. Valid values are \"Redfish\" and \"IPMI\". Redfish is a standard RESTful API for server management. IPMI (Intelligent Platform Management Interface) is a hardware management interface.", +} + +func (PacemakerClusterFencingAgentStatus) SwaggerDoc() map[string]string { + return map_PacemakerClusterFencingAgentStatus +} + +var map_PacemakerClusterList = map[string]string{ + "": "PacemakerClusterList contains a list of PacemakerCluster objects. PacemakerCluster is a cluster-scoped singleton resource; only one instance named \"cluster\" may exist. This list type exists only to satisfy Kubernetes API conventions.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "items is a list of PacemakerCluster objects.", +} + +func (PacemakerClusterList) SwaggerDoc() map[string]string { + return map_PacemakerClusterList +} + +var map_PacemakerClusterNodeStatus = map[string]string{ + "": "PacemakerClusterNodeStatus represents the status of a single node in the pacemaker cluster including the node's conditions and the health of critical resources running on that node.", + "conditions": "conditions represent the observations of the node's current state. Known condition types are: \"Healthy\", \"Online\", \"InService\", \"Active\", \"Ready\", \"Clean\", \"Member\", \"FencingAvailable\", \"FencingHealthy\". The \"Healthy\" condition is an aggregate that tracks the overall health of the node. The \"Online\" condition tracks whether the node is online. The \"InService\" condition tracks whether the node is in service (not in maintenance mode). The \"Active\" condition tracks whether the node is active (not in standby mode). The \"Ready\" condition tracks whether the node is ready (not in a pending state). The \"Clean\" condition tracks whether the node is in a clean (status known) state. The \"Member\" condition tracks whether the node is a member of the cluster. The \"FencingAvailable\" condition tracks whether this node can be fenced by at least one healthy agent. The \"FencingHealthy\" condition tracks whether all fencing agents for this node are healthy. Each of these conditions is required, so the array must contain at least 9 items.", + "nodeName": "nodeName is the name of the node. This is expected to match the Kubernetes node's name, which must be a lowercase RFC 1123 subdomain consisting of lowercase alphanumeric characters, '-' or '.', starting and ending with an alphanumeric character, and be at most 253 characters in length.", + "addresses": "addresses is a list of IP addresses for the node. Pacemaker allows multiple IP addresses for Corosync communication between nodes. The first address in this list is used for IP-based peer URLs for etcd membership. Each address must be a valid global unicast IPv4 or IPv6 address in canonical form (e.g., \"192.168.1.1\" not \"192.168.001.001\", or \"2001:db8::1\" not \"2001:0db8::1\"). This excludes loopback, link-local, and multicast addresses.", + "resources": "resources contains the status of pacemaker resources scheduled on this node. Each resource entry includes the resource name and its health conditions. For Two Node OpenShift with Fencing, we track Kubelet and Etcd resources per node. Both resources are required to be present, so the array must contain at least 2 items. Valid resource names are \"Kubelet\" and \"Etcd\". Fencing agents are tracked separately in the fencingAgents field.", + "fencingAgents": "fencingAgents contains the status of fencing agents that can fence this node. Unlike resources (which are scheduled to run on this node), fencing agents are mapped to the node they can fence (their target), not the node where monitoring operations run. Each fencing agent entry includes a unique name, fencing type, target node, and health conditions. A node is considered fence-capable if at least one fencing agent is healthy. A healthy node is expected to have at least 1 fencing agent, but the list may be empty when fencing agent discovery fails. Names must be unique within this array.", +} + +func (PacemakerClusterNodeStatus) SwaggerDoc() map[string]string { + return map_PacemakerClusterNodeStatus +} + +var map_PacemakerClusterResourceStatus = map[string]string{ + "": "PacemakerClusterResourceStatus represents the status of a pacemaker resource scheduled on a node. A pacemaker resource is a unit of work managed by pacemaker. In pacemaker terminology, resources are services or applications that pacemaker monitors, starts, stops, and moves between nodes to maintain high availability. For Two Node OpenShift with Fencing, we track two resources per node:\n - Kubelet (the Kubernetes node agent and a prerequisite for etcd)\n - Etcd (the distributed key-value store)\n\nFencing agents are tracked separately in the fencingAgents field because they are mapped to their target node (the node they can fence), not the node where monitoring operations are scheduled.", + "conditions": "conditions represent the observations of the resource's current state. Known condition types are: \"Healthy\", \"InService\", \"Managed\", \"Enabled\", \"Operational\", \"Active\", \"Started\", \"Schedulable\". The \"Healthy\" condition is an aggregate that tracks the overall health of the resource. The \"InService\" condition tracks whether the resource is in service (not in maintenance mode). The \"Managed\" condition tracks whether the resource is managed by pacemaker. The \"Enabled\" condition tracks whether the resource is enabled. The \"Operational\" condition tracks whether the resource is operational (not failed). The \"Active\" condition tracks whether the resource is active (available to be used). The \"Started\" condition tracks whether the resource is started. The \"Schedulable\" condition tracks whether the resource is schedulable (not blocked). Each of these conditions is required, so the array must contain at least 8 items.", + "name": "name is the name of the pacemaker resource. Valid values are \"Kubelet\" and \"Etcd\". The Kubelet resource is a prerequisite for etcd in Two Node OpenShift with Fencing deployments. The Etcd resource may temporarily transition to stopped during pacemaker quorum-recovery operations. Fencing agents are tracked separately in the node's fencingAgents field.", +} + +func (PacemakerClusterResourceStatus) SwaggerDoc() map[string]string { + return map_PacemakerClusterResourceStatus +} + +var map_PacemakerClusterStatus = map[string]string{ + "": "PacemakerClusterStatus contains the actual pacemaker cluster status information. As part of validating the status object, we need to ensure that the lastUpdated timestamp may not be set to an earlier timestamp than the current value. The validation rule checks if oldSelf has lastUpdated before comparing, to handle the initial status creation case.", + "conditions": "conditions represent the observations of the pacemaker cluster's current state. Known condition types are: \"Healthy\", \"InService\", \"NodeCountAsExpected\". The \"Healthy\" condition is an aggregate that tracks the overall health of the cluster. The \"InService\" condition tracks whether the cluster is in service (not in maintenance mode). The \"NodeCountAsExpected\" condition tracks whether the expected number of nodes are present. Each of these conditions is required, so the array must contain at least 3 items.", + "lastUpdated": "lastUpdated is the timestamp when this status was last updated. This is useful for identifying stale status reports. It must be a valid timestamp in RFC3339 format. Once set, this field cannot be removed and cannot be set to an earlier timestamp than the current value.", + "nodes": "nodes provides detailed status for each control-plane node in the Pacemaker cluster. While Pacemaker supports up to 32 nodes, the limit is set to 5 (max OpenShift control-plane nodes). For Two Node OpenShift with Fencing, exactly 2 nodes are expected in a healthy cluster. An empty list indicates a catastrophic failure where Pacemaker reports no nodes.", +} + +func (PacemakerClusterStatus) SwaggerDoc() map[string]string { + return map_PacemakerClusterStatus +} + +var map_PacemakerNodeAddress = map[string]string{ + "": "PacemakerNodeAddress contains information for a node's address. This is similar to corev1.NodeAddress but adds validation for IP addresses.", + "type": "type is the type of node address. Currently only \"InternalIP\" is supported.", + "address": "address is the node address. For InternalIP, this must be a valid global unicast IPv4 or IPv6 address in canonical form. Canonical form means the shortest standard representation (e.g., \"192.168.1.1\" not \"192.168.001.001\", or \"2001:db8::1\" not \"2001:0db8::1\"). Maximum length is 39 characters (full IPv6 address). Global unicast includes private/RFC1918 addresses but excludes loopback, link-local, and multicast.", +} + +func (PacemakerNodeAddress) SwaggerDoc() map[string]string { + return map_PacemakerNodeAddress +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/github.com/openshift/api/etcd/v1alpha1/types_pacemakercluster.go b/vendor/github.com/openshift/api/etcd/v1alpha1/types_pacemakercluster.go index ab06d0e390..b627413474 100644 --- a/vendor/github.com/openshift/api/etcd/v1alpha1/types_pacemakercluster.go +++ b/vendor/github.com/openshift/api/etcd/v1alpha1/types_pacemakercluster.go @@ -609,11 +609,12 @@ type PacemakerClusterNodeStatus struct { // to the node they can fence (their target), not the node where monitoring operations run. // Each fencing agent entry includes a unique name, fencing type, target node, and health conditions. // A node is considered fence-capable if at least one fencing agent is healthy. - // Expected to have 1 fencing agent per node, but up to 8 are supported for redundancy. + // A healthy node is expected to have at least 1 fencing agent, but the list may be empty + // when fencing agent discovery fails. // Names must be unique within this array. // +listType=map // +listMapKey=name - // +kubebuilder:validation:MinItems=1 + // +kubebuilder:validation:MinItems=0 // +kubebuilder:validation:MaxItems=8 // +kubebuilder:validation:XValidation:rule="self.all(x, self.exists_one(y, x.name == y.name))",message="fencing agent names must be unique" // +required diff --git a/vendor/github.com/openshift/api/etcd/v1alpha1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/etcd/v1alpha1/zz_generated.swagger_doc_generated.go index 62e1c3ebd7..dc6f224288 100644 --- a/vendor/github.com/openshift/api/etcd/v1alpha1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/etcd/v1alpha1/zz_generated.swagger_doc_generated.go @@ -48,7 +48,7 @@ var map_PacemakerClusterNodeStatus = map[string]string{ "nodeName": "nodeName is the name of the node. This is expected to match the Kubernetes node's name, which must be a lowercase RFC 1123 subdomain consisting of lowercase alphanumeric characters, '-' or '.', starting and ending with an alphanumeric character, and be at most 253 characters in length.", "addresses": "addresses is a list of IP addresses for the node. Pacemaker allows multiple IP addresses for Corosync communication between nodes. The first address in this list is used for IP-based peer URLs for etcd membership. Each address must be a valid global unicast IPv4 or IPv6 address in canonical form (e.g., \"192.168.1.1\" not \"192.168.001.001\", or \"2001:db8::1\" not \"2001:0db8::1\"). This excludes loopback, link-local, and multicast addresses.", "resources": "resources contains the status of pacemaker resources scheduled on this node. Each resource entry includes the resource name and its health conditions. For Two Node OpenShift with Fencing, we track Kubelet and Etcd resources per node. Both resources are required to be present, so the array must contain at least 2 items. Valid resource names are \"Kubelet\" and \"Etcd\". Fencing agents are tracked separately in the fencingAgents field.", - "fencingAgents": "fencingAgents contains the status of fencing agents that can fence this node. Unlike resources (which are scheduled to run on this node), fencing agents are mapped to the node they can fence (their target), not the node where monitoring operations run. Each fencing agent entry includes a unique name, fencing type, target node, and health conditions. A node is considered fence-capable if at least one fencing agent is healthy. Expected to have 1 fencing agent per node, but up to 8 are supported for redundancy. Names must be unique within this array.", + "fencingAgents": "fencingAgents contains the status of fencing agents that can fence this node. Unlike resources (which are scheduled to run on this node), fencing agents are mapped to the node they can fence (their target), not the node where monitoring operations run. Each fencing agent entry includes a unique name, fencing type, target node, and health conditions. A node is considered fence-capable if at least one fencing agent is healthy. A healthy node is expected to have at least 1 fencing agent, but the list may be empty when fencing agent discovery fails. Names must be unique within this array.", } func (PacemakerClusterNodeStatus) SwaggerDoc() map[string]string { diff --git a/vendor/github.com/openshift/api/features.md b/vendor/github.com/openshift/api/features.md index 9142f62826..eb3235e2bb 100644 --- a/vendor/github.com/openshift/api/features.md +++ b/vendor/github.com/openshift/api/features.md @@ -9,18 +9,20 @@ | ClusterAPIComputeInstall| | | Enabled | Enabled | | | | | | ClusterAPIControlPlaneInstall| | | Enabled | Enabled | | | | | | ClusterUpdatePreflight| | | Enabled | Enabled | | | | | +| ConfidentialCluster| | | Enabled | Enabled | | | | | | Example2| | | Enabled | Enabled | | | | | | ExternalOIDCExternalClaimsSourcing| | | Enabled | Enabled | | | | | | ExternalSnapshotMetadata| | | Enabled | Enabled | | | | | -| KMSEncryptionProvider| | | Enabled | Enabled | | | | | | MachineAPIMigrationVSphere| | | Enabled | Enabled | | | | | | NetworkConnect| | | Enabled | Enabled | | | | | | NewOLMBoxCutterRuntime| | | | Enabled | | | | Enabled | | NewOLMCatalogdAPIV1Metas| | | | Enabled | | | | Enabled | | NewOLMConfigAPI| | | | Enabled | | | | Enabled | +| NewOLMOwnSingleNamespace| | | | Enabled | | | | Enabled | | NewOLMPreflightPermissionChecks| | | | Enabled | | | | Enabled | | NoRegistryClusterInstall| | | | Enabled | | | | Enabled | | ProvisioningRequestAvailable| | | Enabled | Enabled | | | | | +| VSphereMultiVCenterDay2| | | Enabled | Enabled | | | | | | AWSClusterHostedDNS| | | Enabled | Enabled | | | Enabled | Enabled | | AWSClusterHostedDNSInstall| | | Enabled | Enabled | | | Enabled | Enabled | | AWSDedicatedHosts| | | Enabled | Enabled | | | Enabled | Enabled | @@ -28,7 +30,6 @@ | AWSEuropeanSovereignCloudInstall| | | Enabled | Enabled | | | Enabled | Enabled | | AdditionalStorageConfig| | | Enabled | Enabled | | | Enabled | Enabled | | AutomatedEtcdBackup| | | Enabled | Enabled | | | Enabled | Enabled | -| AzureClusterHostedDNSInstall| | | Enabled | Enabled | | | Enabled | Enabled | | AzureDedicatedHosts| | | Enabled | Enabled | | | Enabled | Enabled | | AzureDualStackInstall| | | Enabled | Enabled | | | Enabled | Enabled | | AzureMultiDisk| | | Enabled | Enabled | | | Enabled | Enabled | @@ -52,18 +53,14 @@ | ConfigurablePKI| | | Enabled | Enabled | | | Enabled | Enabled | | DNSNameResolver| | | Enabled | Enabled | | | Enabled | Enabled | | DRAPartitionableDevices| | | Enabled | Enabled | | | Enabled | Enabled | -| DualReplica| | | Enabled | Enabled | | | Enabled | Enabled | | DyanmicServiceEndpointIBMCloud| | | Enabled | Enabled | | | Enabled | Enabled | | EVPN| | | Enabled | Enabled | | | Enabled | Enabled | | EtcdBackendQuota| | | Enabled | Enabled | | | Enabled | Enabled | -| EventTTL| | | Enabled | Enabled | | | Enabled | Enabled | | Example| | | Enabled | Enabled | | | Enabled | Enabled | | ExternalOIDCWithUpstreamParity| | | Enabled | Enabled | | | Enabled | Enabled | -| GCPClusterHostedDNS| | | Enabled | Enabled | | | Enabled | Enabled | | GCPCustomAPIEndpoints| | | Enabled | Enabled | | | Enabled | Enabled | | GCPCustomAPIEndpointsInstall| | | Enabled | Enabled | | | Enabled | Enabled | | GCPDualStackInstall| | | Enabled | Enabled | | | Enabled | Enabled | -| GatewayAPIWithoutOLM| | | Enabled | Enabled | | | Enabled | Enabled | | HyperShiftOnlyDynamicResourceAllocation| Enabled | | Enabled | | Enabled | | Enabled | | | ImageModeStatusReporting| | | Enabled | Enabled | | | Enabled | Enabled | | IngressControllerDynamicConfigurationManager| | | Enabled | Enabled | | | Enabled | Enabled | @@ -78,8 +75,8 @@ | MultiDiskSetup| | | Enabled | Enabled | | | Enabled | Enabled | | MutatingAdmissionPolicy| | | Enabled | Enabled | | | Enabled | Enabled | | NewOLM| | Enabled | | Enabled | | Enabled | | Enabled | -| NewOLMOwnSingleNamespace| | Enabled | | Enabled | | Enabled | | Enabled | | NewOLMWebhookProviderOpenshiftServiceCA| | Enabled | | Enabled | | Enabled | | Enabled | +| NoOverlayMode| | | Enabled | Enabled | | | Enabled | Enabled | | NutanixMultiSubnets| | | Enabled | Enabled | | | Enabled | Enabled | | OSStreams| | | Enabled | Enabled | | | Enabled | Enabled | | OVNObservability| | | Enabled | Enabled | | | Enabled | Enabled | @@ -88,18 +85,18 @@ | SignatureStores| | | Enabled | Enabled | | | Enabled | Enabled | | TLSAdherence| | | Enabled | Enabled | | | Enabled | Enabled | | VSphereConfigurableMaxAllowedBlockVolumesPerNode| | | Enabled | Enabled | | | Enabled | Enabled | -| VSphereHostVMGroupZonal| | | Enabled | Enabled | | | Enabled | Enabled | | VSphereMixedNodeEnv| | | Enabled | Enabled | | | Enabled | Enabled | | VolumeGroupSnapshot| | | Enabled | Enabled | | | Enabled | Enabled | | AWSServiceLBNetworkSecurityGroup| | Enabled | Enabled | Enabled | | Enabled | Enabled | Enabled | +| AzureClusterHostedDNSInstall| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | | AzureWorkloadIdentity| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | | BootImageSkewEnforcement| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | | BuildCSIVolumes| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | -| ConsolePluginContentSecurityPolicy| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | +| DualReplica| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | +| EventTTL| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | | ExternalOIDC| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | | ExternalOIDCWithUIDAndExtraClaimMappings| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | -| GCPClusterHostedDNSInstall| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | -| HighlyAvailableArbiter| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | +| GatewayAPIWithoutOLM| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | | ImageStreamImportMode| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | | InsightsConfig| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | | InsightsOnDemandDataGather| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | @@ -114,7 +111,6 @@ | SigstoreImageVerificationPKI| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | | StoragePerformantSecurityPolicy| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | | UpgradeStatus| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | -| UserNamespacesPodSecurityStandards| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | -| UserNamespacesSupport| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | +| VSphereHostVMGroupZonal| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | | VSphereMultiDisk| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | | VSphereMultiNetworks| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | diff --git a/vendor/github.com/openshift/api/features/features.go b/vendor/github.com/openshift/api/features/features.go index ad4faf54fa..0fc8583dfe 100644 --- a/vendor/github.com/openshift/api/features/features.go +++ b/vendor/github.com/openshift/api/features/features.go @@ -75,14 +75,6 @@ func AllFeatureSets() map[uint64]map[ClusterProfileName]map[configv1.FeatureSet] var ( allFeatureGates = map[configv1.FeatureGateName][]featureGateStatus{} - FeatureGateConsolePluginCSP = newFeatureGate("ConsolePluginContentSecurityPolicy"). - reportProblemsToJiraComponent("Management Console"). - contactPerson("jhadvig"). - productScope(ocpSpecific). - enable(inDefault(), inOKD(), inTechPreviewNoUpgrade(), inDevPreviewNoUpgrade()). - enhancementPR("https://github.com/openshift/enhancements/pull/1706"). - mustRegister() - FeatureGateServiceAccountTokenNodeBinding = newFeatureGate("ServiceAccountTokenNodeBinding"). reportProblemsToJiraComponent("apiserver-auth"). contactPerson("ibihim"). @@ -175,7 +167,7 @@ var ( contactPerson("jcpowermac"). productScope(ocpSpecific). enhancementPR("https://github.com/openshift/enhancements/pull/1677"). - enable(inTechPreviewNoUpgrade(), inDevPreviewNoUpgrade()). + enable(inDefault(), inOKD(), inTechPreviewNoUpgrade(), inDevPreviewNoUpgrade()). mustRegister() FeatureGateVSphereMultiDisk = newFeatureGate("VSphereMultiDisk"). @@ -202,6 +194,14 @@ var ( enable(inDevPreviewNoUpgrade()). mustRegister() + FeatureGateNoOverlayMode = newFeatureGate("NoOverlayMode"). + reportProblemsToJiraComponent("Networking/ovn-kubernetes"). + contactPerson("pliurh"). + productScope(ocpSpecific). + enhancementPR("https://github.com/openshift/enhancements/pull/1859"). + enable(inDevPreviewNoUpgrade(), inTechPreviewNoUpgrade()). + mustRegister() + FeatureGateEVPN = newFeatureGate("EVPN"). reportProblemsToJiraComponent("Networking/ovn-kubernetes"). contactPerson("jcaamano"). @@ -264,14 +264,6 @@ var ( enhancementPR(legacyFeatureGateWithoutEnhancement). mustRegister() - FeatureGateGCPClusterHostedDNS = newFeatureGate("GCPClusterHostedDNS"). - reportProblemsToJiraComponent("Installer"). - contactPerson("barbacbd"). - productScope(ocpSpecific). - enhancementPR(legacyFeatureGateWithoutEnhancement). - enable(inTechPreviewNoUpgrade(), inDevPreviewNoUpgrade()). - mustRegister() - FeatureGateAWSClusterHostedDNS = newFeatureGate("AWSClusterHostedDNS"). reportProblemsToJiraComponent("Installer"). contactPerson("barbacbd"). @@ -285,7 +277,7 @@ var ( contactPerson("sadasu"). productScope(ocpSpecific). enhancementPR("https://github.com/openshift/enhancements/pull/1468"). - enable(inTechPreviewNoUpgrade(), inDevPreviewNoUpgrade()). + enable(inDefault(), inOKD(), inTechPreviewNoUpgrade(), inDevPreviewNoUpgrade()). mustRegister() FeatureGateMixedCPUsAllocation = newFeatureGate("MixedCPUsAllocation"). @@ -445,7 +437,7 @@ var ( contactPerson("nschieder"). productScope(ocpSpecific). enhancementPR("https://github.com/openshift/enhancements/pull/1849"). - enable(inClusterProfile(SelfManaged), inDefault(), inOKD(), inTechPreviewNoUpgrade(), inDevPreviewNoUpgrade()). + enable(inClusterProfile(SelfManaged), inTechPreviewNoUpgrade(), inDevPreviewNoUpgrade()). mustRegister() FeatureGateNewOLMWebhookProviderOpenshiftServiceCA = newFeatureGate("NewOLMWebhookProviderOpenshiftServiceCA"). @@ -622,24 +614,6 @@ var ( enable(inDefault(), inOKD(), inTechPreviewNoUpgrade(), inDevPreviewNoUpgrade()). mustRegister() - FeatureGateUserNamespacesSupport = newFeatureGate("UserNamespacesSupport"). - reportProblemsToJiraComponent("Node"). - contactPerson("haircommander"). - productScope(kubernetes). - enhancementPR("https://github.com/kubernetes/enhancements/issues/127"). - enable(inDefault(), inOKD(), inTechPreviewNoUpgrade(), inDevPreviewNoUpgrade()). - mustRegister() - - // Note: this feature is perma-alpha, but it is safe and desireable to enable. - // It was an oversight in upstream to not remove the feature gate after the version skew became safe in 1.33. - // See https://github.com/kubernetes/enhancements/tree/d4226c42/keps/sig-node/127-user-namespaces#pod-security-standards-pss-integration - FeatureGateUserNamespacesPodSecurityStandards = newFeatureGate("UserNamespacesPodSecurityStandards"). - reportProblemsToJiraComponent("Node"). - contactPerson("haircommander"). - productScope(kubernetes). - enhancementPR("https://github.com/kubernetes/enhancements/issues/127"). - enable(inDefault(), inOKD(), inTechPreviewNoUpgrade(), inDevPreviewNoUpgrade()). - mustRegister() FeatureGateVSphereMultiNetworks = newFeatureGate("VSphereMultiNetworks"). reportProblemsToJiraComponent("SPLAT"). @@ -673,14 +647,6 @@ var ( enable(inDevPreviewNoUpgrade(), inTechPreviewNoUpgrade()). mustRegister() - FeatureGateKMSEncryptionProvider = newFeatureGate("KMSEncryptionProvider"). - reportProblemsToJiraComponent("kube-apiserver"). - contactPerson("swghosh"). - productScope(ocpSpecific). - enhancementPR("https://github.com/openshift/enhancements/pull/1682"). - enable(inDevPreviewNoUpgrade()). - mustRegister() - FeatureGateKMSEncryption = newFeatureGate("KMSEncryption"). reportProblemsToJiraComponent("kube-apiserver"). contactPerson("ardaguclu"). @@ -689,14 +655,6 @@ var ( enable(inDevPreviewNoUpgrade(), inTechPreviewNoUpgrade()). mustRegister() - FeatureGateHighlyAvailableArbiter = newFeatureGate("HighlyAvailableArbiter"). - reportProblemsToJiraComponent("Two Node with Arbiter"). - contactPerson("eggfoobar"). - productScope(ocpSpecific). - enhancementPR("https://github.com/openshift/enhancements/pull/1674"). - enable(inDefault(), inOKD(), inTechPreviewNoUpgrade(), inDevPreviewNoUpgrade()). - mustRegister() - FeatureGateCVOConfiguration = newFeatureGate("ClusterVersionOperatorConfiguration"). reportProblemsToJiraComponent("Cluster Version Operator"). contactPerson("dhurta"). @@ -750,7 +708,7 @@ var ( contactPerson("jaypoulz"). productScope(ocpSpecific). enhancementPR("https://github.com/openshift/enhancements/pull/1675"). - enable(inTechPreviewNoUpgrade(), inDevPreviewNoUpgrade()). + enable(inDefault(), inOKD(), inTechPreviewNoUpgrade(), inDevPreviewNoUpgrade()). mustRegister() FeatureShortCertRotation = newFeatureGate("ShortCertRotation"). @@ -808,6 +766,14 @@ var ( enable(inTechPreviewNoUpgrade(), inDevPreviewNoUpgrade()). mustRegister() + FeatureGateVSphereMultiVCenterDay2 = newFeatureGate("VSphereMultiVCenterDay2"). + reportProblemsToJiraComponent("splat"). + contactPerson("vr4manta"). + productScope(ocpSpecific). + enhancementPR("https://github.com/openshift/enhancements/pull/1961"). + enable(inDevPreviewNoUpgrade()). + mustRegister() + FeatureGateAWSServiceLBNetworkSecurityGroup = newFeatureGate("AWSServiceLBNetworkSecurityGroup"). reportProblemsToJiraComponent("Cloud Compute / Cloud Controller Manager"). contactPerson("mtulio"). @@ -825,14 +791,6 @@ var ( enable(inClusterProfile(SelfManaged), inTechPreviewNoUpgrade(), inDevPreviewNoUpgrade()). mustRegister() - FeatureGateGCPClusterHostedDNSInstall = newFeatureGate("GCPClusterHostedDNSInstall"). - reportProblemsToJiraComponent("Installer"). - contactPerson("barbacbd"). - productScope(ocpSpecific). - enhancementPR("https://github.com/openshift/enhancements/pull/1468"). - enable(inDefault(), inOKD(), inTechPreviewNoUpgrade(), inDevPreviewNoUpgrade()). - mustRegister() - FeatureGateAWSClusterHostedDNSInstall = newFeatureGate("AWSClusterHostedDNSInstall"). reportProblemsToJiraComponent("Installer"). contactPerson("barbacbd"). @@ -908,7 +866,7 @@ var ( contactPerson("tjungblu"). productScope(ocpSpecific). enhancementPR("https://github.com/openshift/enhancements/pull/1857"). - enable(inTechPreviewNoUpgrade(), inDevPreviewNoUpgrade()). + enable(inDefault(), inOKD(), inTechPreviewNoUpgrade(), inDevPreviewNoUpgrade()). mustRegister() FeatureGateMutableCSINodeAllocatableCount = newFeatureGate("MutableCSINodeAllocatableCount"). @@ -1002,7 +960,7 @@ var ( contactPerson("miciah"). productScope(ocpSpecific). enhancementPR("https://github.com/openshift/enhancements/pull/1933"). - enable(inDevPreviewNoUpgrade(), inTechPreviewNoUpgrade()). + enable(inDefault(), inOKD(), inDevPreviewNoUpgrade(), inTechPreviewNoUpgrade()). mustRegister() FeatureGateTLSAdherence = newFeatureGate("TLSAdherence"). @@ -1012,4 +970,12 @@ var ( enhancementPR("https://github.com/openshift/enhancements/pull/1910"). enable(inDevPreviewNoUpgrade(), inTechPreviewNoUpgrade()). mustRegister() + + FeatureGateConfidentialCluster = newFeatureGate("ConfidentialCluster"). + reportProblemsToJiraComponent("ConfidentialClusters"). + contactPerson("fjin"). + productScope(ocpSpecific). + enhancementPR("https://github.com/openshift/enhancements/pull/1962"). + enable(inDevPreviewNoUpgrade()). + mustRegister() ) diff --git a/vendor/github.com/openshift/api/machine/v1beta1/types_machineset.go b/vendor/github.com/openshift/api/machine/v1beta1/types_machineset.go index be5476344b..cbbe0b337c 100644 --- a/vendor/github.com/openshift/api/machine/v1beta1/types_machineset.go +++ b/vendor/github.com/openshift/api/machine/v1beta1/types_machineset.go @@ -129,6 +129,14 @@ type MachineSetStatus struct { // observedGeneration reflects the generation of the most recently observed MachineSet. // +optional ObservedGeneration int64 `json:"observedGeneration,omitempty"` + // labelSelector is a label selector, in string format, for Machines corresponding to the MachineSet. + // It is exposed via the scale subresource as status.selector. + // When omitted, the MachineSet controller has not yet reconciled spec.selector into status.labelSelector. + // When present, it must not be empty and must not exceed 4096 characters. + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=4096 + // +optional + LabelSelector string `json:"labelSelector,omitempty"` // In the event that there is a terminal problem reconciling the // replicas, both ErrorReason and ErrorMessage will be set. ErrorReason // will be populated with a succinct value suitable for machine diff --git a/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.crd-manifests/0000_10_machine-api_01_machinesets-CustomNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.crd-manifests/0000_10_machine-api_01_machinesets-CustomNoUpgrade.crd.yaml index 7c68b7b189..06f2ea472a 100644 --- a/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.crd-manifests/0000_10_machine-api_01_machinesets-CustomNoUpgrade.crd.yaml +++ b/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.crd-manifests/0000_10_machine-api_01_machinesets-CustomNoUpgrade.crd.yaml @@ -660,6 +660,15 @@ spec: labels of the machine template of the MachineSet. format: int32 type: integer + labelSelector: + description: |- + labelSelector is a label selector, in string format, for Machines corresponding to the MachineSet. + It is exposed via the scale subresource as status.selector. + When omitted, the MachineSet controller has not yet reconciled spec.selector into status.labelSelector. + When present, it must not be empty and must not exceed 4096 characters. + maxLength: 4096 + minLength: 1 + type: string observedGeneration: description: observedGeneration reflects the generation of the most recently observed MachineSet. diff --git a/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.crd-manifests/0000_10_machine-api_01_machinesets-Default.crd.yaml b/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.crd-manifests/0000_10_machine-api_01_machinesets-Default.crd.yaml index 624d477021..fb47ff7c99 100644 --- a/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.crd-manifests/0000_10_machine-api_01_machinesets-Default.crd.yaml +++ b/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.crd-manifests/0000_10_machine-api_01_machinesets-Default.crd.yaml @@ -615,6 +615,15 @@ spec: labels of the machine template of the MachineSet. format: int32 type: integer + labelSelector: + description: |- + labelSelector is a label selector, in string format, for Machines corresponding to the MachineSet. + It is exposed via the scale subresource as status.selector. + When omitted, the MachineSet controller has not yet reconciled spec.selector into status.labelSelector. + When present, it must not be empty and must not exceed 4096 characters. + maxLength: 4096 + minLength: 1 + type: string observedGeneration: description: observedGeneration reflects the generation of the most recently observed MachineSet. diff --git a/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.crd-manifests/0000_10_machine-api_01_machinesets-DevPreviewNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.crd-manifests/0000_10_machine-api_01_machinesets-DevPreviewNoUpgrade.crd.yaml index 37fb42ca91..6b32059d0f 100644 --- a/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.crd-manifests/0000_10_machine-api_01_machinesets-DevPreviewNoUpgrade.crd.yaml +++ b/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.crd-manifests/0000_10_machine-api_01_machinesets-DevPreviewNoUpgrade.crd.yaml @@ -660,6 +660,15 @@ spec: labels of the machine template of the MachineSet. format: int32 type: integer + labelSelector: + description: |- + labelSelector is a label selector, in string format, for Machines corresponding to the MachineSet. + It is exposed via the scale subresource as status.selector. + When omitted, the MachineSet controller has not yet reconciled spec.selector into status.labelSelector. + When present, it must not be empty and must not exceed 4096 characters. + maxLength: 4096 + minLength: 1 + type: string observedGeneration: description: observedGeneration reflects the generation of the most recently observed MachineSet. diff --git a/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.crd-manifests/0000_10_machine-api_01_machinesets-OKD.crd.yaml b/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.crd-manifests/0000_10_machine-api_01_machinesets-OKD.crd.yaml index 7cd0341da0..0cb72ea4b7 100644 --- a/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.crd-manifests/0000_10_machine-api_01_machinesets-OKD.crd.yaml +++ b/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.crd-manifests/0000_10_machine-api_01_machinesets-OKD.crd.yaml @@ -615,6 +615,15 @@ spec: labels of the machine template of the MachineSet. format: int32 type: integer + labelSelector: + description: |- + labelSelector is a label selector, in string format, for Machines corresponding to the MachineSet. + It is exposed via the scale subresource as status.selector. + When omitted, the MachineSet controller has not yet reconciled spec.selector into status.labelSelector. + When present, it must not be empty and must not exceed 4096 characters. + maxLength: 4096 + minLength: 1 + type: string observedGeneration: description: observedGeneration reflects the generation of the most recently observed MachineSet. diff --git a/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.crd-manifests/0000_10_machine-api_01_machinesets-TechPreviewNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.crd-manifests/0000_10_machine-api_01_machinesets-TechPreviewNoUpgrade.crd.yaml index d93d2c15f8..71e0196782 100644 --- a/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.crd-manifests/0000_10_machine-api_01_machinesets-TechPreviewNoUpgrade.crd.yaml +++ b/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.crd-manifests/0000_10_machine-api_01_machinesets-TechPreviewNoUpgrade.crd.yaml @@ -660,6 +660,15 @@ spec: labels of the machine template of the MachineSet. format: int32 type: integer + labelSelector: + description: |- + labelSelector is a label selector, in string format, for Machines corresponding to the MachineSet. + It is exposed via the scale subresource as status.selector. + When omitted, the MachineSet controller has not yet reconciled spec.selector into status.labelSelector. + When present, it must not be empty and must not exceed 4096 characters. + maxLength: 4096 + minLength: 1 + type: string observedGeneration: description: observedGeneration reflects the generation of the most recently observed MachineSet. diff --git a/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.swagger_doc_generated.go index 2c4a9030cc..e686cad25a 100644 --- a/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.swagger_doc_generated.go @@ -748,6 +748,7 @@ var map_MachineSetStatus = map[string]string{ "readyReplicas": "The number of ready replicas for this MachineSet. A machine is considered ready when the node has been created and is \"Ready\".", "availableReplicas": "The number of available replicas (ready for at least minReadySeconds) for this MachineSet.", "observedGeneration": "observedGeneration reflects the generation of the most recently observed MachineSet.", + "labelSelector": "labelSelector is a label selector, in string format, for Machines corresponding to the MachineSet. It is exposed via the scale subresource as status.selector. When omitted, the MachineSet controller has not yet reconciled spec.selector into status.labelSelector. When present, it must not be empty and must not exceed 4096 characters.", "errorReason": "In the event that there is a terminal problem reconciling the replicas, both ErrorReason and ErrorMessage will be set. ErrorReason will be populated with a succinct value suitable for machine interpretation, while ErrorMessage will contain a more verbose string suitable for logging and human consumption.\n\nThese fields should not be set for transitive errors that a controller faces that are expected to be fixed automatically over time (like service outages), but instead indicate that something is fundamentally wrong with the MachineTemplate's spec or the configuration of the machine controller, and that manual intervention is required. Examples of terminal errors would be invalid combinations of settings in the spec, values that are unsupported by the machine controller, or the responsible machine controller itself being critically misconfigured.\n\nAny transient errors that occur during the reconciliation of Machines can be added as events to the MachineSet object and/or logged in the controller's output.", "conditions": "conditions defines the current state of the MachineSet", "authoritativeAPI": "authoritativeAPI is the API that is authoritative for this resource. Valid values are MachineAPI, ClusterAPI and Migrating. This value is updated by the migration controller to reflect the authoritative API. Machine API and Cluster API controllers use this value to determine whether or not to reconcile the resource. When set to Migrating, the migration controller is currently performing the handover of authority from one API to the other.", diff --git a/vendor/github.com/openshift/api/operator/v1/types_csi_cluster_driver.go b/vendor/github.com/openshift/api/operator/v1/types_csi_cluster_driver.go index 53c71aabb6..52f5db78d5 100644 --- a/vendor/github.com/openshift/api/operator/v1/types_csi_cluster_driver.go +++ b/vendor/github.com/openshift/api/operator/v1/types_csi_cluster_driver.go @@ -163,7 +163,15 @@ type AWSCSIDriverConfigSpec struct { // kmsKeyARN sets the cluster default storage class to encrypt volumes with a user-defined KMS key, // rather than the default KMS key used by AWS. // The value may be either the ARN or Alias ARN of a KMS key. - // +kubebuilder:validation:Pattern:=`^arn:(aws|aws-cn|aws-us-gov|aws-iso|aws-iso-b|aws-iso-e|aws-iso-f):kms:[a-z0-9-]+:[0-9]{12}:(key|alias)\/.*$` + // + // The ARN must follow the format: arn::kms:::(key|alias)/, where: + // is the AWS partition (aws, aws-cn, aws-us-gov, aws-iso, aws-iso-b, aws-iso-e, aws-iso-f, or aws-eusc), + // is the AWS region, + // is a 12-digit numeric identifier for the AWS account, + // is the KMS key ID or alias name. + // + // +openshift:validation:FeatureGateAwareXValidation:featureGate="",rule=`matches(self, '^arn:(aws|aws-cn|aws-us-gov|aws-iso|aws-iso-b|aws-iso-e|aws-iso-f):kms:[a-z0-9-]+:[0-9]{12}:(key|alias)/.*$')`,message=`kmsKeyARN must be a valid AWS KMS key ARN in the format: arn::kms:::(key|alias)/` + // +openshift:validation:FeatureGateAwareXValidation:featureGate=AWSEuropeanSovereignCloudInstall,rule=`matches(self, '^arn:(aws|aws-cn|aws-us-gov|aws-iso|aws-iso-b|aws-iso-e|aws-iso-f|aws-eusc):kms:[a-z0-9-]+:[0-9]{12}:(key|alias)/.*$')`,message=`kmsKeyARN must be a valid AWS KMS key ARN in the format: arn::kms:::(key|alias)/` // +optional KMSKeyARN string `json:"kmsKeyARN,omitempty"` diff --git a/vendor/github.com/openshift/api/operator/v1/types_ingress.go b/vendor/github.com/openshift/api/operator/v1/types_ingress.go index d54352f2ce..0c5cf919e1 100644 --- a/vendor/github.com/openshift/api/operator/v1/types_ingress.go +++ b/vendor/github.com/openshift/api/operator/v1/types_ingress.go @@ -2068,8 +2068,51 @@ type IngressControllerTuningOptions struct { // +kubebuilder:validation:Type:=string // +optional ReloadInterval metav1.Duration `json:"reloadInterval,omitempty"` + + // configurationManagement specifies how OpenShift router should update + // the HAProxy configuration. The following values are valid for this + // field: + // + // * "ForkAndReload". + // * "Dynamic". + // + // Omitting this field means that the user has no opinion and the + // platform may choose a reasonable default. This default is subject to + // change over time. The current default is "ForkAndReload". + // + // "ForkAndReload" means that OpenShift router should rewrite the + // HAProxy configuration file and instruct HAProxy to fork and reload. + // This is OpenShift router's traditional approach. + // + // "Dynamic" means that OpenShift router may use HAProxy's control + // socket for some configuration updates and fall back to fork and + // reload for other configuration updates. This is a newer approach, + // which may be less mature than ForkAndReload. This setting can + // improve load-balancing fairness and metrics accuracy and reduce CPU + // and memory usage if HAProxy has frequent configuration updates for + // route and endpoints updates. + // + // Note: The "Dynamic" option is currently experimental and should not + // be enabled on production clusters. + // + // +openshift:enable:FeatureGate=IngressControllerDynamicConfigurationManager + // +optional + ConfigurationManagement IngressControllerConfigurationManagement `json:"configurationManagement,omitempty"` } +// IngressControllerConfigurationManagement specifies whether always to use +// fork-and-reload to update the HAProxy configuration or whether to use +// HAProxy's control socket for some configuration updates. +// +// +enum +// +kubebuilder:validation:Enum=Dynamic;ForkAndReload +type IngressControllerConfigurationManagement string + +const ( + IngressControllerConfigurationManagementDynamic IngressControllerConfigurationManagement = "Dynamic" + IngressControllerConfigurationManagementForkAndReload IngressControllerConfigurationManagement = "ForkAndReload" +) + // HTTPEmptyRequestsPolicy indicates how HTTP connections for which no request // is received should be handled. // +kubebuilder:validation:Enum=Respond;Ignore diff --git a/vendor/github.com/openshift/api/operator/v1/types_network.go b/vendor/github.com/openshift/api/operator/v1/types_network.go index 1cf56f549b..cd2e2f9e38 100644 --- a/vendor/github.com/openshift/api/operator/v1/types_network.go +++ b/vendor/github.com/openshift/api/operator/v1/types_network.go @@ -398,6 +398,12 @@ type OpenShiftSDNConfig struct { // ovnKubernetesConfig contains the configuration parameters for networks // using the ovn-kubernetes network project +// +openshift:validation:FeatureGateAwareXValidation:featureGate=NoOverlayMode,rule="self.?transport.orValue('') == 'NoOverlay' ? self.?routeAdvertisements.orValue('') == 'Enabled' : true",message="routeAdvertisements must be Enabled when transport is NoOverlay" +// +openshift:validation:FeatureGateAwareXValidation:featureGate=NoOverlayMode,rule="self.?transport.orValue('') == 'NoOverlay' ? has(self.noOverlayConfig) : !has(self.noOverlayConfig)",message="noOverlayConfig must be set if transport is NoOverlay, and is forbidden otherwise" +// +openshift:validation:FeatureGateAwareXValidation:featureGate=NoOverlayMode,rule="self.?noOverlayConfig.routing.orValue('') == 'Managed' ? has(self.bgpManagedConfig) : true",message="bgpManagedConfig is required when noOverlayConfig.routing is Managed" +// +openshift:validation:FeatureGateAwareXValidation:featureGate=NoOverlayMode,rule="!has(self.transport) || self.transport == 'Geneve' || has(oldSelf.transport)",message="transport can only be set to Geneve after installation" +// +openshift:validation:FeatureGateAwareXValidation:featureGate=NoOverlayMode,rule="!has(oldSelf.transport) || has(self.transport)",message="transport may not be removed once set" +// +openshift:validation:FeatureGateAwareXValidation:featureGate=NoOverlayMode,rule="!has(oldSelf.noOverlayConfig) || has(self.noOverlayConfig)",message="noOverlayConfig may not be removed once set" type OVNKubernetesConfig struct { // mtu is the MTU to use for the tunnel interface. This must be 100 // bytes smaller than the uplink mtu. @@ -466,6 +472,38 @@ type OVNKubernetesConfig struct { // current default is "Disabled". // +optional RouteAdvertisements RouteAdvertisementsEnablement `json:"routeAdvertisements,omitempty"` + + // transport sets the transport mode for pods on the default network. + // Allowed values are "NoOverlay" and "Geneve". + // "NoOverlay" avoids tunnel encapsulation, routing pod traffic directly between nodes. + // "Geneve" encapsulates pod traffic using Geneve tunnels between nodes. + // When omitted, this means the user has no opinion and the platform chooses + // a reasonable default which is subject to change over time. + // The current default is "Geneve". + // "NoOverlay" can only be set at installation time and cannot be changed afterwards. + // "Geneve" may be set explicitly at any time to lock in the current default. + // +openshift:enable:FeatureGate=NoOverlayMode + // +kubebuilder:validation:Enum=NoOverlay;Geneve + // +openshift:validation:FeatureGateAwareXValidation:featureGate=NoOverlayMode,rule="self == oldSelf",message="transport is immutable once set" + // +optional + Transport TransportOption `json:"transport,omitempty"` + + // noOverlayConfig contains configuration for no-overlay mode. + // This configuration applies to the default network only. + // It is required when transport is "NoOverlay". + // When omitted, this means the user does not configure no-overlay mode options. + // +openshift:enable:FeatureGate=NoOverlayMode + // +optional + NoOverlayConfig NoOverlayConfig `json:"noOverlayConfig,omitzero,omitempty"` + + // bgpManagedConfig configures the BGP properties for networks (default network or CUDNs) + // in no-overlay mode that specify routing="Managed" in their noOverlayConfig. + // It is required when noOverlayConfig.routing is set to "Managed". + // When omitted, this means the user does not configure BGP for managed routing. + // This field can be set at installation time or on day 2, and can be modified at any time. + // +openshift:enable:FeatureGate=NoOverlayMode + // +optional + BGPManagedConfig BGPManagedConfig `json:"bgpManagedConfig,omitzero,omitempty"` } type IPv4OVNKubernetesConfig struct { @@ -896,3 +934,80 @@ type AdditionalRoutingCapabilities struct { // +kubebuilder:validation:XValidation:rule="self.all(x, self.exists_one(y, x == y))" Providers []RoutingCapabilitiesProvider `json:"providers"` } + +// TransportOption is the type for network transport options +type TransportOption string + +// SNATOption is the type for SNAT configuration options +type SNATOption string + +// RoutingOption is the type for routing configuration options +type RoutingOption string + +// BGPTopology is the type for BGP topology configuration +type BGPTopology string + +const ( + // TransportOptionNoOverlay indicates the network operates in no-overlay mode + TransportOptionNoOverlay TransportOption = "NoOverlay" + // TransportOptionGeneve indicates the network uses Geneve overlay + TransportOptionGeneve TransportOption = "Geneve" + + // SNATEnabled indicates outbound SNAT is enabled + SNATEnabled SNATOption = "Enabled" + // SNATDisabled indicates outbound SNAT is disabled + SNATDisabled SNATOption = "Disabled" + + // RoutingManaged indicates routing is managed by OVN-Kubernetes + RoutingManaged RoutingOption = "Managed" + // RoutingUnmanaged indicates routing is managed by users + RoutingUnmanaged RoutingOption = "Unmanaged" + + // BGPTopologyFullMesh indicates a full mesh BGP topology where every node peers directly with every other node + BGPTopologyFullMesh BGPTopology = "FullMesh" +) + +// NoOverlayConfig contains configuration options for networks operating in no-overlay mode. +type NoOverlayConfig struct { + // outboundSNAT defines the SNAT behavior for outbound traffic from pods. + // Allowed values are "Enabled" and "Disabled". + // When set to "Enabled", SNAT is performed on outbound traffic from pods. + // When set to "Disabled", SNAT is not performed and pod IPs are preserved in outbound traffic. + // This field is required when the network operates in no-overlay mode. + // This field can be set to any value at installation time and can be changed afterwards. + // +kubebuilder:validation:Enum=Enabled;Disabled + // +required + OutboundSNAT SNATOption `json:"outboundSNAT,omitempty"` + + // routing specifies whether the pod network routing is managed by OVN-Kubernetes or users. + // Allowed values are "Managed" and "Unmanaged". + // When set to "Managed", OVN-Kubernetes manages the pod network routing configuration through BGP. + // When set to "Unmanaged", users are responsible for configuring the pod network routing. + // This field is required when the network operates in no-overlay mode. + // This field is immutable once set. + // +kubebuilder:validation:Enum=Managed;Unmanaged + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="routing is immutable once set" + // +required + Routing RoutingOption `json:"routing,omitempty"` +} + +// BGPManagedConfig contains configuration options for BGP when routing is "Managed". +type BGPManagedConfig struct { + // asNumber is the 2-byte or 4-byte Autonomous System Number (ASN) + // to be used in the generated FRR configuration. + // Valid values are 1 to 4294967295. + // When omitted, this defaults to 64512. + // +kubebuilder:validation:Minimum=1 + // +kubebuilder:validation:Maximum=4294967295 + // +default=64512 + // +optional + ASNumber int64 `json:"asNumber,omitempty"` + + // bgpTopology defines the BGP topology to be used. + // Allowed values are "FullMesh". + // When set to "FullMesh", every node peers directly with every other node via BGP. + // This field is required when BGPManagedConfig is specified. + // +kubebuilder:validation:Enum=FullMesh + // +required + BGPTopology BGPTopology `json:"bgpTopology,omitempty"` +} diff --git a/vendor/github.com/openshift/api/operator/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/operator/v1/zz_generated.deepcopy.go index 3bc6b81de4..3d3c8f4f82 100644 --- a/vendor/github.com/openshift/api/operator/v1/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/api/operator/v1/zz_generated.deepcopy.go @@ -390,6 +390,22 @@ func (in *AzureDiskEncryptionSet) DeepCopy() *AzureDiskEncryptionSet { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BGPManagedConfig) DeepCopyInto(out *BGPManagedConfig) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BGPManagedConfig. +func (in *BGPManagedConfig) DeepCopy() *BGPManagedConfig { + if in == nil { + return nil + } + out := new(BGPManagedConfig) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *BootImageSkewEnforcementConfig) DeepCopyInto(out *BootImageSkewEnforcementConfig) { *out = *in @@ -3665,6 +3681,22 @@ func (in *NetworkStatus) DeepCopy() *NetworkStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NoOverlayConfig) DeepCopyInto(out *NoOverlayConfig) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NoOverlayConfig. +func (in *NoOverlayConfig) DeepCopy() *NoOverlayConfig { + if in == nil { + return nil + } + out := new(NoOverlayConfig) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NodeDisruptionPolicyClusterStatus) DeepCopyInto(out *NodeDisruptionPolicyClusterStatus) { *out = *in @@ -4158,6 +4190,8 @@ func (in *OVNKubernetesConfig) DeepCopyInto(out *OVNKubernetesConfig) { *out = new(IPv6OVNKubernetesConfig) **out = **in } + out.NoOverlayConfig = in.NoOverlayConfig + out.BGPManagedConfig = in.BGPManagedConfig return } diff --git a/vendor/github.com/openshift/api/operator/v1/zz_generated.featuregated-crd-manifests.yaml b/vendor/github.com/openshift/api/operator/v1/zz_generated.featuregated-crd-manifests.yaml index 1c552b0c0e..aaf0972908 100644 --- a/vendor/github.com/openshift/api/operator/v1/zz_generated.featuregated-crd-manifests.yaml +++ b/vendor/github.com/openshift/api/operator/v1/zz_generated.featuregated-crd-manifests.yaml @@ -69,6 +69,7 @@ clustercsidrivers.operator.openshift.io: Capability: "" Category: "" FeatureGates: + - AWSEuropeanSovereignCloudInstall - VSphereConfigurableMaxAllowedBlockVolumesPerNode FilenameOperatorName: csi-driver FilenameOperatorOrdering: "01" @@ -175,7 +176,8 @@ ingresscontrollers.operator.openshift.io: CRDName: ingresscontrollers.operator.openshift.io Capability: Ingress Category: "" - FeatureGates: [] + FeatureGates: + - IngressControllerDynamicConfigurationManager FilenameOperatorName: ingress FilenameOperatorOrdering: "00" FilenameRunLevel: "0000_50" @@ -326,7 +328,8 @@ networks.operator.openshift.io: CRDName: networks.operator.openshift.io Capability: "" Category: "" - FeatureGates: [] + FeatureGates: + - NoOverlayMode FilenameOperatorName: network FilenameOperatorOrdering: "01" FilenameRunLevel: "0000_70" diff --git a/vendor/github.com/openshift/api/operator/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/operator/v1/zz_generated.swagger_doc_generated.go index 64aac26eb3..c3ed726028 100644 --- a/vendor/github.com/openshift/api/operator/v1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/operator/v1/zz_generated.swagger_doc_generated.go @@ -466,7 +466,7 @@ func (Theme) SwaggerDoc() map[string]string { var map_AWSCSIDriverConfigSpec = map[string]string{ "": "AWSCSIDriverConfigSpec defines properties that can be configured for the AWS CSI driver.", - "kmsKeyARN": "kmsKeyARN sets the cluster default storage class to encrypt volumes with a user-defined KMS key, rather than the default KMS key used by AWS. The value may be either the ARN or Alias ARN of a KMS key.", + "kmsKeyARN": "kmsKeyARN sets the cluster default storage class to encrypt volumes with a user-defined KMS key, rather than the default KMS key used by AWS. The value may be either the ARN or Alias ARN of a KMS key.\n\nThe ARN must follow the format: arn::kms:::(key|alias)/, where: is the AWS partition (aws, aws-cn, aws-us-gov, aws-iso, aws-iso-b, aws-iso-e, aws-iso-f, or aws-eusc), is the AWS region, is a 12-digit numeric identifier for the AWS account, is the KMS key ID or alias name.", "efsVolumeMetrics": "efsVolumeMetrics sets the configuration for collecting metrics from EFS volumes used by the EFS CSI Driver.", } @@ -1121,6 +1121,7 @@ var map_IngressControllerTuningOptions = map[string]string{ "healthCheckInterval": "healthCheckInterval defines how long the router waits between two consecutive health checks on its configured backends. This value is applied globally as a default for all routes, but may be overridden per-route by the route annotation \"router.openshift.io/haproxy.health.check.interval\".\n\nExpects an unsigned duration string of decimal numbers, each with optional fraction and a unit suffix, eg \"300ms\", \"1.5h\" or \"2h45m\". Valid time units are \"ns\", \"us\" (or \"µs\" U+00B5 or \"μs\" U+03BC), \"ms\", \"s\", \"m\", \"h\".\n\nSetting this to less than 5s can cause excess traffic due to too frequent TCP health checks and accompanying SYN packet storms. Alternatively, setting this too high can result in increased latency, due to backend servers that are no longer available, but haven't yet been detected as such.\n\nAn empty or zero healthCheckInterval means no opinion and IngressController chooses a default, which is subject to change over time. Currently the default healthCheckInterval value is 5s.\n\nCurrently the minimum allowed value is 1s and the maximum allowed value is 2147483647ms (24.85 days). Both are subject to change over time.", "maxConnections": "maxConnections defines the maximum number of simultaneous connections that can be established per HAProxy process. Increasing this value allows each ingress controller pod to handle more connections but at the cost of additional system resources being consumed.\n\nPermitted values are: empty, 0, -1, and the range 2000-2000000.\n\nIf this field is empty or 0, the IngressController will use the default value of 50000, but the default is subject to change in future releases.\n\nIf the value is -1 then HAProxy will dynamically compute a maximum value based on the available ulimits in the running container. Selecting -1 (i.e., auto) will result in a large value being computed (~520000 on OpenShift >=4.10 clusters) and therefore each HAProxy process will incur significant memory usage compared to the current default of 50000.\n\nSetting a value that is greater than the current operating system limit will prevent the HAProxy process from starting.\n\nIf you choose a discrete value (e.g., 750000) and the router pod is migrated to a new node, there's no guarantee that that new node has identical ulimits configured. In such a scenario the pod would fail to start. If you have nodes with different ulimits configured (e.g., different tuned profiles) and you choose a discrete value then the guidance is to use -1 and let the value be computed dynamically at runtime.\n\nYou can monitor memory usage for router containers with the following metric: 'container_memory_working_set_bytes{container=\"router\",namespace=\"openshift-ingress\"}'.\n\nYou can monitor memory usage of individual HAProxy processes in router containers with the following metric: 'container_memory_working_set_bytes{container=\"router\",namespace=\"openshift-ingress\"}/container_processes{container=\"router\",namespace=\"openshift-ingress\"}'.", "reloadInterval": "reloadInterval defines the minimum interval at which the router is allowed to reload to accept new changes. Increasing this value can prevent the accumulation of HAProxy processes, depending on the scenario. Increasing this interval can also lessen load imbalance on a backend's servers when using the roundrobin balancing algorithm. Alternatively, decreasing this value may decrease latency since updates to HAProxy's configuration can take effect more quickly.\n\nThe value must be a time duration value; see . Currently, the minimum value allowed is 1s, and the maximum allowed value is 120s. Minimum and maximum allowed values may change in future versions of OpenShift. Note that if a duration outside of these bounds is provided, the value of reloadInterval will be capped/floored and not rejected (e.g. a duration of over 120s will be capped to 120s; the IngressController will not reject and replace this disallowed value with the default).\n\nA zero value for reloadInterval tells the IngressController to choose the default, which is currently 5s and subject to change without notice.\n\nThis field expects an unsigned duration string of decimal numbers, each with optional fraction and a unit suffix, e.g. \"300ms\", \"1.5h\" or \"2h45m\". Valid time units are \"ns\", \"us\" (or \"µs\" U+00B5 or \"μs\" U+03BC), \"ms\", \"s\", \"m\", \"h\".\n\nNote: Setting a value significantly larger than the default of 5s can cause latency in observing updates to routes and their endpoints. HAProxy's configuration will be reloaded less frequently, and newly created routes will not be served until the subsequent reload.", + "configurationManagement": "configurationManagement specifies how OpenShift router should update the HAProxy configuration. The following values are valid for this field:\n\n* \"ForkAndReload\". * \"Dynamic\".\n\nOmitting this field means that the user has no opinion and the platform may choose a reasonable default. This default is subject to change over time. The current default is \"ForkAndReload\".\n\n\"ForkAndReload\" means that OpenShift router should rewrite the HAProxy configuration file and instruct HAProxy to fork and reload. This is OpenShift router's traditional approach.\n\n\"Dynamic\" means that OpenShift router may use HAProxy's control socket for some configuration updates and fall back to fork and reload for other configuration updates. This is a newer approach, which may be less mature than ForkAndReload. This setting can improve load-balancing fairness and metrics accuracy and reduce CPU and memory usage if HAProxy has frequent configuration updates for route and endpoints updates.\n\nNote: The \"Dynamic\" option is currently experimental and should not be enabled on production clusters.", } func (IngressControllerTuningOptions) SwaggerDoc() map[string]string { @@ -1669,6 +1670,16 @@ func (AdditionalRoutingCapabilities) SwaggerDoc() map[string]string { return map_AdditionalRoutingCapabilities } +var map_BGPManagedConfig = map[string]string{ + "": "BGPManagedConfig contains configuration options for BGP when routing is \"Managed\".", + "asNumber": "asNumber is the 2-byte or 4-byte Autonomous System Number (ASN) to be used in the generated FRR configuration. Valid values are 1 to 4294967295. When omitted, this defaults to 64512.", + "bgpTopology": "bgpTopology defines the BGP topology to be used. Allowed values are \"FullMesh\". When set to \"FullMesh\", every node peers directly with every other node via BGP. This field is required when BGPManagedConfig is specified.", +} + +func (BGPManagedConfig) SwaggerDoc() map[string]string { + return map_BGPManagedConfig +} + var map_ClusterNetworkEntry = map[string]string{ "": "ClusterNetworkEntry is a subnet from which to allocate PodIPs. A network of size HostPrefix (in CIDR notation) will be allocated when nodes join the cluster. If the HostPrefix field is not used by the plugin, it can be left unset. Not all network providers support multiple ClusterNetworks", } @@ -1896,6 +1907,16 @@ func (NetworkStatus) SwaggerDoc() map[string]string { return map_NetworkStatus } +var map_NoOverlayConfig = map[string]string{ + "": "NoOverlayConfig contains configuration options for networks operating in no-overlay mode.", + "outboundSNAT": "outboundSNAT defines the SNAT behavior for outbound traffic from pods. Allowed values are \"Enabled\" and \"Disabled\". When set to \"Enabled\", SNAT is performed on outbound traffic from pods. When set to \"Disabled\", SNAT is not performed and pod IPs are preserved in outbound traffic. This field is required when the network operates in no-overlay mode. This field can be set to any value at installation time and can be changed afterwards.", + "routing": "routing specifies whether the pod network routing is managed by OVN-Kubernetes or users. Allowed values are \"Managed\" and \"Unmanaged\". When set to \"Managed\", OVN-Kubernetes manages the pod network routing configuration through BGP. When set to \"Unmanaged\", users are responsible for configuring the pod network routing. This field is required when the network operates in no-overlay mode. This field is immutable once set.", +} + +func (NoOverlayConfig) SwaggerDoc() map[string]string { + return map_NoOverlayConfig +} + var map_OVNKubernetesConfig = map[string]string{ "": "ovnKubernetesConfig contains the configuration parameters for networks using the ovn-kubernetes network project", "mtu": "mtu is the MTU to use for the tunnel interface. This must be 100 bytes smaller than the uplink mtu. Default is 1400", @@ -1910,6 +1931,9 @@ var map_OVNKubernetesConfig = map[string]string{ "ipv4": "ipv4 allows users to configure IP settings for IPv4 connections. When ommitted, this means no opinions and the default configuration is used. Check individual fields within ipv4 for details of default values.", "ipv6": "ipv6 allows users to configure IP settings for IPv6 connections. When ommitted, this means no opinions and the default configuration is used. Check individual fields within ipv4 for details of default values.", "routeAdvertisements": "routeAdvertisements determines if the functionality to advertise cluster network routes through a dynamic routing protocol, such as BGP, is enabled or not. This functionality is configured through the ovn-kubernetes RouteAdvertisements CRD. Requires the 'FRR' routing capability provider to be enabled as an additional routing capability. Allowed values are \"Enabled\", \"Disabled\" and ommited. When omitted, this means the user has no opinion and the platform is left to choose reasonable defaults. These defaults are subject to change over time. The current default is \"Disabled\".", + "transport": "transport sets the transport mode for pods on the default network. Allowed values are \"NoOverlay\" and \"Geneve\". \"NoOverlay\" avoids tunnel encapsulation, routing pod traffic directly between nodes. \"Geneve\" encapsulates pod traffic using Geneve tunnels between nodes. When omitted, this means the user has no opinion and the platform chooses a reasonable default which is subject to change over time. The current default is \"Geneve\". \"NoOverlay\" can only be set at installation time and cannot be changed afterwards. \"Geneve\" may be set explicitly at any time to lock in the current default.", + "noOverlayConfig": "noOverlayConfig contains configuration for no-overlay mode. This configuration applies to the default network only. It is required when transport is \"NoOverlay\". When omitted, this means the user does not configure no-overlay mode options.", + "bgpManagedConfig": "bgpManagedConfig configures the BGP properties for networks (default network or CUDNs) in no-overlay mode that specify routing=\"Managed\" in their noOverlayConfig. It is required when noOverlayConfig.routing is set to \"Managed\". When omitted, this means the user does not configure BGP for managed routing. This field can be set at installation time or on day 2, and can be modified at any time.", } func (OVNKubernetesConfig) SwaggerDoc() map[string]string { diff --git a/vendor/github.com/openshift/api/operator/v1alpha1/types_clusterapi.go b/vendor/github.com/openshift/api/operator/v1alpha1/types_clusterapi.go index 5816f93628..719893f8a5 100644 --- a/vendor/github.com/openshift/api/operator/v1alpha1/types_clusterapi.go +++ b/vendor/github.com/openshift/api/operator/v1alpha1/types_clusterapi.go @@ -20,6 +20,7 @@ import ( // Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. // +openshift:compatibility-gen:level=4 // +kubebuilder:validation:XValidation:rule="self.metadata.name == 'cluster'",message="clusterapi is a singleton, .metadata.name must be 'cluster'" +// +kubebuilder:validation:XValidation:rule="!has(oldSelf.status) || has(self.status)",message="status may not be removed once set" type ClusterAPI struct { metav1.TypeMeta `json:",inline"` @@ -78,6 +79,7 @@ type RevisionName string // ClusterAPIStatus describes the current state of the capi-operator. // +kubebuilder:validation:XValidation:rule="self.revisions.exists(r, r.name == self.desiredRevision && self.revisions.all(s, s.revision <= r.revision))",message="desiredRevision must be the name of the revision with the highest revision number" // +kubebuilder:validation:XValidation:rule="!has(self.currentRevision) || self.revisions.exists(r, r.name == self.currentRevision)",message="currentRevision must correspond to an entry in the revisions list" +// +kubebuilder:validation:XValidation:rule="!has(oldSelf.observedRevisionGeneration) || has(self.observedRevisionGeneration)",message="observedRevisionGeneration may not be unset once set" type ClusterAPIStatus struct { // currentRevision is the name of the most recently fully applied revision. // It is written by the installer controller. If it is absent, it indicates @@ -111,6 +113,15 @@ type ClusterAPIStatus struct { // +kubebuilder:validation:XValidation:rule="self.all(new, oldSelf.exists(old, old.name == new.name) || oldSelf.all(old, new.revision > old.revision))",message="new revisions must have a revision number greater than all existing revisions" // +kubebuilder:validation:XValidation:rule="oldSelf.all(old, !self.exists(new, new.name == old.name) || self.exists(new, new == old))",message="existing revisions are immutable, but may be removed" Revisions []ClusterAPIInstallerRevision `json:"revisions,omitempty"` + + // observedRevisionGeneration is the generation of the ClusterAPI object that was last observed by the revision controller. + // If specified it must be greater than or equal to 1, and less than 2^53. It may not decrease or be unset once set. + // + // +optional + // +kubebuilder:validation:Minimum=1 + // +kubebuilder:validation:Maximum=9007199254740991 + // +kubebuilder:validation:XValidation:rule="self >= oldSelf",message="observedRevisionGeneration may not decrease" + ObservedRevisionGeneration int64 `json:"observedRevisionGeneration,omitempty"` } // +structType=atomic @@ -144,6 +155,17 @@ type ClusterAPIInstallerRevision struct { // +optional UnmanagedCustomResourceDefinitions []string `json:"unmanagedCustomResourceDefinitions,omitempty"` + // manifestSubstitutions is a list of envsubst style substitutions which + // will be applied to manifests in the revision during rendering. If + // defined it must not be empty, and may not contain more than 32 items. + // Each manifest substitution must have a unique key. + // +optional + // +listType=map + // +listMapKey=key + // +kubebuilder:validation:MinItems=1 + // +kubebuilder:validation:MaxItems=32 + ManifestSubstitutions []ClusterAPIInstallerRevisionManifestSubstitution `json:"manifestSubstitutions,omitempty"` + // components is a list of components which will be installed by this // revision. Components will be installed in the order they are listed. If // omitted no components will be installed. @@ -157,6 +179,29 @@ type ClusterAPIInstallerRevision struct { Components []ClusterAPIInstallerComponent `json:"components,omitempty"` } +// ClusterAPIInstallerRevisionManifestSubstitution defines an envsubst style +// substitution which will be applied to manifests in a revision during +// rendering. +type ClusterAPIInstallerRevisionManifestSubstitution struct { + // key is the name of the envsubst variable to substitute. It must be a + // valid envsubst variable name, consisting of letters, digits, and + // underscores, and must start with a letter or underscore. The key must + // not be empty, and must not exceed 255 characters. + // +required + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=255 + // +kubebuilder:validation:XValidation:rule="self.matches('^[A-Za-z_][A-Za-z0-9_]*$')",message="key must start with a letter or underscore, followed by letters, digits, or underscores" + Key string `json:"key,omitempty"` + + // value is the value to substitute for the envsubst variable. It may be + // empty, in which case the variable will be substituted with an empty + // string. The value must not exceed 4096 characters. + // +required + // +kubebuilder:validation:MinLength=0 + // +kubebuilder:validation:MaxLength=4096 + Value *string `json:"value,omitempty"` +} + // InstallerComponentType is the type of component to install. // +kubebuilder:validation:Enum=Image // +enum @@ -168,9 +213,24 @@ const ( ) // ClusterAPIInstallerComponent defines a component which will be installed by this revision. +type ClusterAPIInstallerComponent struct { + // name is the human-readable name of the component. The value has no + // effect, and will not be set if the component does not define a name in + // its manifests. If set it must consist of alphanumeric characters, or + // '-', and may not exceed 255 characters. + // +optional + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=255 + // +kubebuilder:validation:XValidation:rule="self.matches('^[A-Za-z0-9-]+$')",message="name must consist of alphanumeric characters or '-'" + Name string `json:"name,omitempty"` + + ClusterAPIInstallerComponentSource `json:",inline"` +} + +// ClusterAPIInstallerComponentSource defines the source of a component which will be installed by this revision. // +union // +kubebuilder:validation:XValidation:rule="self.type == 'Image' ? has(self.image) : !has(self.image)",message="image is required when type is Image, and forbidden otherwise" -type ClusterAPIInstallerComponent struct { +type ClusterAPIInstallerComponentSource struct { // type is the source type of the component. // The only valid value is Image. // When set to Image, the image field must be set and will define an image source for the component. diff --git a/vendor/github.com/openshift/api/operator/v1alpha1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/operator/v1alpha1/zz_generated.deepcopy.go index 1f3fd281e1..3c3dc8e7a5 100644 --- a/vendor/github.com/openshift/api/operator/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/api/operator/v1alpha1/zz_generated.deepcopy.go @@ -61,7 +61,7 @@ func (in *ClusterAPI) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ClusterAPIInstallerComponent) DeepCopyInto(out *ClusterAPIInstallerComponent) { *out = *in - out.Image = in.Image + out.ClusterAPIInstallerComponentSource = in.ClusterAPIInstallerComponentSource return } @@ -91,6 +91,23 @@ func (in *ClusterAPIInstallerComponentImage) DeepCopy() *ClusterAPIInstallerComp return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterAPIInstallerComponentSource) DeepCopyInto(out *ClusterAPIInstallerComponentSource) { + *out = *in + out.Image = in.Image + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterAPIInstallerComponentSource. +func (in *ClusterAPIInstallerComponentSource) DeepCopy() *ClusterAPIInstallerComponentSource { + if in == nil { + return nil + } + out := new(ClusterAPIInstallerComponentSource) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ClusterAPIInstallerRevision) DeepCopyInto(out *ClusterAPIInstallerRevision) { *out = *in @@ -99,6 +116,13 @@ func (in *ClusterAPIInstallerRevision) DeepCopyInto(out *ClusterAPIInstallerRevi *out = make([]string, len(*in)) copy(*out, *in) } + if in.ManifestSubstitutions != nil { + in, out := &in.ManifestSubstitutions, &out.ManifestSubstitutions + *out = make([]ClusterAPIInstallerRevisionManifestSubstitution, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } if in.Components != nil { in, out := &in.Components, &out.Components *out = make([]ClusterAPIInstallerComponent, len(*in)) @@ -117,6 +141,27 @@ func (in *ClusterAPIInstallerRevision) DeepCopy() *ClusterAPIInstallerRevision { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterAPIInstallerRevisionManifestSubstitution) DeepCopyInto(out *ClusterAPIInstallerRevisionManifestSubstitution) { + *out = *in + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterAPIInstallerRevisionManifestSubstitution. +func (in *ClusterAPIInstallerRevisionManifestSubstitution) DeepCopy() *ClusterAPIInstallerRevisionManifestSubstitution { + if in == nil { + return nil + } + out := new(ClusterAPIInstallerRevisionManifestSubstitution) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ClusterAPIList) DeepCopyInto(out *ClusterAPIList) { *out = *in diff --git a/vendor/github.com/openshift/api/operator/v1alpha1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/operator/v1alpha1/zz_generated.swagger_doc_generated.go index 92cef1421a..bf4117768d 100644 --- a/vendor/github.com/openshift/api/operator/v1alpha1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/operator/v1alpha1/zz_generated.swagger_doc_generated.go @@ -147,9 +147,8 @@ func (ClusterAPI) SwaggerDoc() map[string]string { } var map_ClusterAPIInstallerComponent = map[string]string{ - "": "ClusterAPIInstallerComponent defines a component which will be installed by this revision.", - "type": "type is the source type of the component. The only valid value is Image. When set to Image, the image field must be set and will define an image source for the component.", - "image": "image defines an image source for a component. The image must contain a /capi-operator-installer directory containing the component manifests.", + "": "ClusterAPIInstallerComponent defines a component which will be installed by this revision.", + "name": "name is the human-readable name of the component. The value has no effect, and will not be set if the component does not define a name in its manifests. If set it must consist of alphanumeric characters, or '-', and may not exceed 255 characters.", } func (ClusterAPIInstallerComponent) SwaggerDoc() map[string]string { @@ -166,11 +165,22 @@ func (ClusterAPIInstallerComponentImage) SwaggerDoc() map[string]string { return map_ClusterAPIInstallerComponentImage } +var map_ClusterAPIInstallerComponentSource = map[string]string{ + "": "ClusterAPIInstallerComponentSource defines the source of a component which will be installed by this revision.", + "type": "type is the source type of the component. The only valid value is Image. When set to Image, the image field must be set and will define an image source for the component.", + "image": "image defines an image source for a component. The image must contain a /capi-operator-installer directory containing the component manifests.", +} + +func (ClusterAPIInstallerComponentSource) SwaggerDoc() map[string]string { + return map_ClusterAPIInstallerComponentSource +} + var map_ClusterAPIInstallerRevision = map[string]string{ "name": "name is the name of a revision.", "revision": "revision is a monotonically increasing number that is assigned to a revision.", "contentID": "contentID uniquely identifies the content of this revision. The contentID must be between 1 and 255 characters long.", "unmanagedCustomResourceDefinitions": "unmanagedCustomResourceDefinitions is a list of the names of ClusterResourceDefinition (CRD) objects which are included in this revision, but which should not be installed or updated. If not set, all CRDs in the revision will be managed by the CAPI operator.", + "manifestSubstitutions": "manifestSubstitutions is a list of envsubst style substitutions which will be applied to manifests in the revision during rendering. If defined it must not be empty, and may not contain more than 32 items. Each manifest substitution must have a unique key.", "components": "components is a list of components which will be installed by this revision. Components will be installed in the order they are listed. If omitted no components will be installed.\n\nThe maximum number of components is 32.", } @@ -178,6 +188,16 @@ func (ClusterAPIInstallerRevision) SwaggerDoc() map[string]string { return map_ClusterAPIInstallerRevision } +var map_ClusterAPIInstallerRevisionManifestSubstitution = map[string]string{ + "": "ClusterAPIInstallerRevisionManifestSubstitution defines an envsubst style substitution which will be applied to manifests in a revision during rendering.", + "key": "key is the name of the envsubst variable to substitute. It must be a valid envsubst variable name, consisting of letters, digits, and underscores, and must start with a letter or underscore. The key must not be empty, and must not exceed 255 characters.", + "value": "value is the value to substitute for the envsubst variable. It may be empty, in which case the variable will be substituted with an empty string. The value must not exceed 4096 characters.", +} + +func (ClusterAPIInstallerRevisionManifestSubstitution) SwaggerDoc() map[string]string { + return map_ClusterAPIInstallerRevisionManifestSubstitution +} + var map_ClusterAPIList = map[string]string{ "": "ClusterAPIList contains a list of ClusterAPI configurations\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", @@ -198,10 +218,11 @@ func (ClusterAPISpec) SwaggerDoc() map[string]string { } var map_ClusterAPIStatus = map[string]string{ - "": "ClusterAPIStatus describes the current state of the capi-operator.", - "currentRevision": "currentRevision is the name of the most recently fully applied revision. It is written by the installer controller. If it is absent, it indicates that no revision has been fully applied yet. If set, currentRevision must correspond to an entry in the revisions list.", - "desiredRevision": "desiredRevision is the name of the desired revision. It is written by the revision controller. It must be set to the name of the entry in the revisions list with the highest revision number.", - "revisions": "revisions is a list of all currently active revisions. A revision is active until the installer controller updates currentRevision to a later revision. It is written by the revision controller.\n\nThe maximum number of revisions is 16. All revisions must have a unique name. All revisions must have a unique revision number. When adding a revision, the revision number must be greater than the highest revision number in the list. Revisions are immutable, although they can be deleted.", + "": "ClusterAPIStatus describes the current state of the capi-operator.", + "currentRevision": "currentRevision is the name of the most recently fully applied revision. It is written by the installer controller. If it is absent, it indicates that no revision has been fully applied yet. If set, currentRevision must correspond to an entry in the revisions list.", + "desiredRevision": "desiredRevision is the name of the desired revision. It is written by the revision controller. It must be set to the name of the entry in the revisions list with the highest revision number.", + "revisions": "revisions is a list of all currently active revisions. A revision is active until the installer controller updates currentRevision to a later revision. It is written by the revision controller.\n\nThe maximum number of revisions is 16. All revisions must have a unique name. All revisions must have a unique revision number. When adding a revision, the revision number must be greater than the highest revision number in the list. Revisions are immutable, although they can be deleted.", + "observedRevisionGeneration": "observedRevisionGeneration is the generation of the ClusterAPI object that was last observed by the revision controller. If specified it must be greater than or equal to 1, and less than 2^53. It may not decrease or be unset once set.", } func (ClusterAPIStatus) SwaggerDoc() map[string]string { diff --git a/vendor/github.com/openshift/api/quota/v1/generated.proto b/vendor/github.com/openshift/api/quota/v1/generated.proto index fb7fed242a..998c594732 100644 --- a/vendor/github.com/openshift/api/quota/v1/generated.proto +++ b/vendor/github.com/openshift/api/quota/v1/generated.proto @@ -51,7 +51,7 @@ message AppliedClusterResourceQuotaList { // +kubebuilder:resource:path=clusterresourcequotas,scope=Cluster // +kubebuilder:subresource:status // +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/470 -// +openshift:file-pattern=cvoRunLevel=0000_03,operatorName=config-operator,operatorOrdering=01 +// +openshift:file-pattern=cvoRunLevel=0000_00,operatorName=apiserver,operatorOrdering=01 // +openshift:compatibility-gen:level=1 // +kubebuilder:metadata:annotations=release.openshift.io/bootstrap-required=true message ClusterResourceQuota { diff --git a/vendor/github.com/openshift/api/quota/v1/types.go b/vendor/github.com/openshift/api/quota/v1/types.go index 0cfb85f87e..9f60962146 100644 --- a/vendor/github.com/openshift/api/quota/v1/types.go +++ b/vendor/github.com/openshift/api/quota/v1/types.go @@ -17,7 +17,7 @@ import ( // +kubebuilder:resource:path=clusterresourcequotas,scope=Cluster // +kubebuilder:subresource:status // +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/470 -// +openshift:file-pattern=cvoRunLevel=0000_03,operatorName=config-operator,operatorOrdering=01 +// +openshift:file-pattern=cvoRunLevel=0000_00,operatorName=apiserver,operatorOrdering=01 // +openshift:compatibility-gen:level=1 // +kubebuilder:metadata:annotations=release.openshift.io/bootstrap-required=true type ClusterResourceQuota struct { diff --git a/vendor/github.com/openshift/api/quota/v1/zz_generated.featuregated-crd-manifests.yaml b/vendor/github.com/openshift/api/quota/v1/zz_generated.featuregated-crd-manifests.yaml index b0fae46f7d..1a56a512d7 100644 --- a/vendor/github.com/openshift/api/quota/v1/zz_generated.featuregated-crd-manifests.yaml +++ b/vendor/github.com/openshift/api/quota/v1/zz_generated.featuregated-crd-manifests.yaml @@ -6,9 +6,9 @@ clusterresourcequotas.quota.openshift.io: Capability: "" Category: "" FeatureGates: [] - FilenameOperatorName: config-operator + FilenameOperatorName: apiserver FilenameOperatorOrdering: "01" - FilenameRunLevel: "0000_03" + FilenameRunLevel: "0000_00" GroupName: quota.openshift.io HasStatus: true KindName: ClusterResourceQuota diff --git a/vendor/github.com/openshift/api/security/v1/generated.proto b/vendor/github.com/openshift/api/security/v1/generated.proto index 933de5450e..bb8a37fc0f 100644 --- a/vendor/github.com/openshift/api/security/v1/generated.proto +++ b/vendor/github.com/openshift/api/security/v1/generated.proto @@ -246,7 +246,7 @@ message SecurityContextConstraints { optional bool allowPrivilegedContainer = 3; // defaultAddCapabilities is the default set of capabilities that will be added to the container - // unless the pod spec specifically drops the capability. You may not list a capabiility in both + // unless the pod spec specifically drops the capability. You may not list a capability in both // DefaultAddCapabilities and RequiredDropCapabilities. // +nullable // +listType=atomic @@ -302,7 +302,6 @@ message SecurityContextConstraints { // When "AllowHostLevel" is set, a pod author may set `hostUsers` to either `true` or `false`. // When "RequirePodLevel" is set, a pod author must set `hostUsers` to `false`. // When omitted, the default value is "AllowHostLevel". - // +openshift:enable:FeatureGate=UserNamespacesPodSecurityStandards // +kubebuilder:validation:Enum="AllowHostLevel";"RequirePodLevel" // +kubebuilder:default:="AllowHostLevel" // +default="AllowHostLevel" @@ -358,7 +357,7 @@ message SecurityContextConstraints { // seccompProfiles lists the allowed profiles that may be set for the pod or // container's seccomp annotations. An unset (nil) or empty value means that no profiles may - // be specifid by the pod or container. The wildcard '*' may be used to allow all profiles. When + // be specified by the pod or container. The wildcard '*' may be used to allow all profiles. When // used to generate a value for a pod the first non-wildcard profile will be used as // the default. // +nullable diff --git a/vendor/github.com/openshift/api/security/v1/types.go b/vendor/github.com/openshift/api/security/v1/types.go index fb491480d7..a875902138 100644 --- a/vendor/github.com/openshift/api/security/v1/types.go +++ b/vendor/github.com/openshift/api/security/v1/types.go @@ -57,7 +57,7 @@ type SecurityContextConstraints struct { // allowPrivilegedContainer determines if a container can request to be run as privileged. AllowPrivilegedContainer bool `json:"allowPrivilegedContainer" protobuf:"varint,3,opt,name=allowPrivilegedContainer"` // defaultAddCapabilities is the default set of capabilities that will be added to the container - // unless the pod spec specifically drops the capability. You may not list a capabiility in both + // unless the pod spec specifically drops the capability. You may not list a capability in both // DefaultAddCapabilities and RequiredDropCapabilities. // +nullable // +listType=atomic @@ -103,7 +103,6 @@ type SecurityContextConstraints struct { // When "AllowHostLevel" is set, a pod author may set `hostUsers` to either `true` or `false`. // When "RequirePodLevel" is set, a pod author must set `hostUsers` to `false`. // When omitted, the default value is "AllowHostLevel". - // +openshift:enable:FeatureGate=UserNamespacesPodSecurityStandards // +kubebuilder:validation:Enum="AllowHostLevel";"RequirePodLevel" // +kubebuilder:default:="AllowHostLevel" // +default="AllowHostLevel" @@ -151,7 +150,7 @@ type SecurityContextConstraints struct { // seccompProfiles lists the allowed profiles that may be set for the pod or // container's seccomp annotations. An unset (nil) or empty value means that no profiles may - // be specifid by the pod or container. The wildcard '*' may be used to allow all profiles. When + // be specified by the pod or container. The wildcard '*' may be used to allow all profiles. When // used to generate a value for a pod the first non-wildcard profile will be used as // the default. // +nullable diff --git a/vendor/github.com/openshift/api/security/v1/zz_generated.featuregated-crd-manifests.yaml b/vendor/github.com/openshift/api/security/v1/zz_generated.featuregated-crd-manifests.yaml index 178c970780..86f78058a2 100644 --- a/vendor/github.com/openshift/api/security/v1/zz_generated.featuregated-crd-manifests.yaml +++ b/vendor/github.com/openshift/api/security/v1/zz_generated.featuregated-crd-manifests.yaml @@ -5,8 +5,7 @@ securitycontextconstraints.security.openshift.io: CRDName: securitycontextconstraints.security.openshift.io Capability: "" Category: "" - FeatureGates: - - UserNamespacesPodSecurityStandards + FeatureGates: [] FilenameOperatorName: config-operator FilenameOperatorOrdering: "01" FilenameRunLevel: "0000_03" diff --git a/vendor/github.com/openshift/api/security/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/security/v1/zz_generated.swagger_doc_generated.go index 29cddf7e64..67882a66e9 100644 --- a/vendor/github.com/openshift/api/security/v1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/security/v1/zz_generated.swagger_doc_generated.go @@ -171,7 +171,7 @@ var map_SecurityContextConstraints = map[string]string{ "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "priority": "priority influences the sort order of SCCs when evaluating which SCCs to try first for a given pod request based on access in the Users and Groups fields. The higher the int, the higher priority. An unset value is considered a 0 priority. If scores for multiple SCCs are equal they will be sorted from most restrictive to least restrictive. If both priorities and restrictions are equal the SCCs will be sorted by name.", "allowPrivilegedContainer": "allowPrivilegedContainer determines if a container can request to be run as privileged.", - "defaultAddCapabilities": "defaultAddCapabilities is the default set of capabilities that will be added to the container unless the pod spec specifically drops the capability. You may not list a capabiility in both DefaultAddCapabilities and RequiredDropCapabilities.", + "defaultAddCapabilities": "defaultAddCapabilities is the default set of capabilities that will be added to the container unless the pod spec specifically drops the capability. You may not list a capability in both DefaultAddCapabilities and RequiredDropCapabilities.", "requiredDropCapabilities": "requiredDropCapabilities are the capabilities that will be dropped from the container. These are required to be dropped and cannot be added.", "allowedCapabilities": "allowedCapabilities is a list of capabilities that can be requested to add to the container. Capabilities in this field maybe added at the pod author's discretion. You must not list a capability in both AllowedCapabilities and RequiredDropCapabilities. To allow all capabilities you may use '*'.", "allowHostDirVolumePlugin": "allowHostDirVolumePlugin determines if the policy allow containers to use the HostDir volume plugin", @@ -191,7 +191,7 @@ var map_SecurityContextConstraints = map[string]string{ "readOnlyRootFilesystem": "readOnlyRootFilesystem when set to true will force containers to run with a read only root file system. If the container specifically requests to run with a non-read only root file system the SCC should deny the pod. If set to false the container may run with a read only root file system if it wishes but it will not be forced to.", "users": "The users who have permissions to use this security context constraints", "groups": "The groups that have permission to use this security context constraints", - "seccompProfiles": "seccompProfiles lists the allowed profiles that may be set for the pod or container's seccomp annotations. An unset (nil) or empty value means that no profiles may be specifid by the pod or container.\tThe wildcard '*' may be used to allow all profiles. When used to generate a value for a pod the first non-wildcard profile will be used as the default.", + "seccompProfiles": "seccompProfiles lists the allowed profiles that may be set for the pod or container's seccomp annotations. An unset (nil) or empty value means that no profiles may be specified by the pod or container.\tThe wildcard '*' may be used to allow all profiles. When used to generate a value for a pod the first non-wildcard profile will be used as the default.", "allowedUnsafeSysctls": "allowedUnsafeSysctls is a list of explicitly allowed unsafe sysctls, defaults to none. Each entry is either a plain sysctl name or ends in \"*\" in which case it is considered as a prefix of allowed sysctls. Single * means all unsafe sysctls are allowed. Kubelet has to whitelist all allowed unsafe sysctls explicitly to avoid rejection.\n\nExamples: e.g. \"foo/*\" allows \"foo/bar\", \"foo/baz\", etc. e.g. \"foo.*\" allows \"foo.bar\", \"foo.baz\", etc.", "forbiddenSysctls": "forbiddenSysctls is a list of explicitly forbidden sysctls, defaults to none. Each entry is either a plain sysctl name or ends in \"*\" in which case it is considered as a prefix of forbidden sysctls. Single * means all sysctls are forbidden.\n\nExamples: e.g. \"foo/*\" forbids \"foo/bar\", \"foo/baz\", etc. e.g. \"foo.*\" forbids \"foo.bar\", \"foo.baz\", etc.", } diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/awsdnsspec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/awsdnsspec.go index ec57615082..457cb43aca 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/awsdnsspec.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/awsdnsspec.go @@ -10,6 +10,11 @@ type AWSDNSSpecApplyConfiguration struct { // privateZoneIAMRole contains the ARN of an IAM role that should be assumed when performing // operations on the cluster's private hosted zone specified in the cluster DNS config. // When left empty, no role should be assumed. + // + // The ARN must follow the format: arn::iam:::role/, where: + // is the AWS partition (aws, aws-cn, aws-us-gov, or aws-eusc), + // is a 12-digit numeric identifier for the AWS account, + // is the IAM role name. PrivateZoneIAMRole *string `json:"privateZoneIAMRole,omitempty"` } diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/awskmsconfig.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/awskmsconfig.go deleted file mode 100644 index 483e570da2..0000000000 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/awskmsconfig.go +++ /dev/null @@ -1,42 +0,0 @@ -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1 - -// AWSKMSConfigApplyConfiguration represents a declarative configuration of the AWSKMSConfig type for use -// with apply. -// -// AWSKMSConfig defines the KMS config specific to AWS KMS provider -type AWSKMSConfigApplyConfiguration struct { - // keyARN specifies the Amazon Resource Name (ARN) of the AWS KMS key used for encryption. - // The value must adhere to the format `arn:aws:kms:::key/`, where: - // - `` is the AWS region consisting of lowercase letters and hyphens followed by a number. - // - `` is a 12-digit numeric identifier for the AWS account. - // - `` is a unique identifier for the KMS key, consisting of lowercase hexadecimal characters and hyphens. - KeyARN *string `json:"keyARN,omitempty"` - // region specifies the AWS region where the KMS instance exists, and follows the format - // `--`, e.g.: `us-east-1`. - // Only lowercase letters and hyphens followed by numbers are allowed. - Region *string `json:"region,omitempty"` -} - -// AWSKMSConfigApplyConfiguration constructs a declarative configuration of the AWSKMSConfig type for use with -// apply. -func AWSKMSConfig() *AWSKMSConfigApplyConfiguration { - return &AWSKMSConfigApplyConfiguration{} -} - -// WithKeyARN sets the KeyARN field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the KeyARN field is set to the value of the last call. -func (b *AWSKMSConfigApplyConfiguration) WithKeyARN(value string) *AWSKMSConfigApplyConfiguration { - b.KeyARN = &value - return b -} - -// WithRegion sets the Region field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Region field is set to the value of the last call. -func (b *AWSKMSConfigApplyConfiguration) WithRegion(value string) *AWSKMSConfigApplyConfiguration { - b.Region = &value - return b -} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/infrastructurestatus.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/infrastructurestatus.go index f5c63c6ae9..c01827c113 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/infrastructurestatus.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/infrastructurestatus.go @@ -42,6 +42,8 @@ type InfrastructureStatusApplyConfiguration struct { // and the operators should not configure the operand for highly-available operation // The 'External' mode indicates that the control plane is hosted externally to the cluster and that // its components are not visible within the cluster. + // The 'HighlyAvailableArbiter' mode indicates that the control plane will consist of 2 control-plane nodes + // that run conventional services and 1 smaller sized arbiter node that runs a bare minimum of services to maintain quorum. ControlPlaneTopology *configv1.TopologyMode `json:"controlPlaneTopology,omitempty"` // infrastructureTopology expresses the expectations for infrastructure services that do not run on control // plane nodes, usually indicated by a node selector for a `role` value diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/kmsconfig.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/kmsconfig.go index 3590aae241..8eac52ddda 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/kmsconfig.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/kmsconfig.go @@ -10,15 +10,17 @@ import ( // with apply. // // KMSConfig defines the configuration for the KMS instance -// that will be used with KMSEncryptionProvider encryption +// that will be used with KMS encryption type KMSConfigApplyConfiguration struct { // type defines the kind of platform for the KMS provider. - // Available provider types are AWS only. + // Allowed values are Vault. + // When set to Vault, the plugin connects to a HashiCorp Vault server for key management. Type *configv1.KMSProviderType `json:"type,omitempty"` - // aws defines the key config for using an AWS KMS instance - // for the encryption. The AWS KMS instance is managed + // vault defines the configuration for the Vault KMS plugin. + // The plugin connects to a Vault Enterprise server that is managed // by the user outside the purview of the control plane. - AWS *AWSKMSConfigApplyConfiguration `json:"aws,omitempty"` + // This field must be set when type is Vault, and must be unset otherwise. + Vault *VaultKMSConfigApplyConfiguration `json:"vault,omitempty"` } // KMSConfigApplyConfiguration constructs a declarative configuration of the KMSConfig type for use with @@ -35,10 +37,10 @@ func (b *KMSConfigApplyConfiguration) WithType(value configv1.KMSProviderType) * return b } -// WithAWS sets the AWS field in the declarative configuration to the given value +// WithVault sets the Vault field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the AWS field is set to the value of the last call. -func (b *KMSConfigApplyConfiguration) WithAWS(value *AWSKMSConfigApplyConfiguration) *KMSConfigApplyConfiguration { - b.AWS = value +// If called multiple times, the Vault field is set to the value of the last call. +func (b *KMSConfigApplyConfiguration) WithVault(value *VaultKMSConfigApplyConfiguration) *KMSConfigApplyConfiguration { + b.Vault = value return b } diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/prefixedclaimmapping.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/prefixedclaimmapping.go index d9b0e87914..08ebf26a86 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/prefixedclaimmapping.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/prefixedclaimmapping.go @@ -11,7 +11,8 @@ type PrefixedClaimMappingApplyConfiguration struct { TokenClaimMappingApplyConfiguration `json:",inline"` // prefix is an optional field that configures the prefix that will be applied to the cluster identity attribute during the process of mapping JWT claims to cluster identity attributes. // - // When omitted (""), no prefix is applied to the cluster identity attribute. + // When omitted or set to an empty string (""), no prefix is applied to the cluster identity attribute. + // Must not be set to a non-empty value when expression is set. // // Example: if `prefix` is set to "myoidc:" and the `claim` in JWT contains an array of strings "a", "b" and "c", the mapping will result in an array of string "myoidc:a", "myoidc:b" and "myoidc:c". Prefix *string `json:"prefix,omitempty"` diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/usernameclaimmapping.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/usernameclaimmapping.go index d33d6d71d8..8676ae891f 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/usernameclaimmapping.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/usernameclaimmapping.go @@ -29,11 +29,9 @@ type UsernameClaimMappingApplyConfiguration struct { // Allowed values are 'Prefix', 'NoPrefix', and omitted (not provided or an empty string). // // When set to 'Prefix', the value specified in the prefix field will be prepended to the value of the JWT claim. - // // The prefix field must be set when prefixPolicy is 'Prefix'. - // + // Must not be set to 'Prefix' when expression is set. // When set to 'NoPrefix', no prefix will be prepended to the value of the JWT claim. - // // When omitted, this means no opinion and the platform is left to choose any prefixes that are applied which is subject to change over time. // Currently, the platform prepends `{issuerURL}#` to the value of the JWT claim when the claim is not 'email'. // diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vaultapproleauthentication.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vaultapproleauthentication.go new file mode 100644 index 0000000000..ab924194cb --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vaultapproleauthentication.go @@ -0,0 +1,30 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// VaultAppRoleAuthenticationApplyConfiguration represents a declarative configuration of the VaultAppRoleAuthentication type for use +// with apply. +// +// VaultAppRoleAuthentication defines the configuration for AppRole authentication with Vault. +type VaultAppRoleAuthenticationApplyConfiguration struct { + // secret references a secret in the openshift-config namespace containing + // the AppRole credentials used to authenticate with Vault. + // The secret must contain two keys: "roleID" for the AppRole Role ID and "secretID" for the AppRole Secret ID. + // + // The namespace for the secret is openshift-config. + Secret *VaultSecretReferenceApplyConfiguration `json:"secret,omitempty"` +} + +// VaultAppRoleAuthenticationApplyConfiguration constructs a declarative configuration of the VaultAppRoleAuthentication type for use with +// apply. +func VaultAppRoleAuthentication() *VaultAppRoleAuthenticationApplyConfiguration { + return &VaultAppRoleAuthenticationApplyConfiguration{} +} + +// WithSecret sets the Secret field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Secret field is set to the value of the last call. +func (b *VaultAppRoleAuthenticationApplyConfiguration) WithSecret(value *VaultSecretReferenceApplyConfiguration) *VaultAppRoleAuthenticationApplyConfiguration { + b.Secret = value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vaultauthentication.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vaultauthentication.go new file mode 100644 index 0000000000..466bbc7970 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vaultauthentication.go @@ -0,0 +1,43 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" +) + +// VaultAuthenticationApplyConfiguration represents a declarative configuration of the VaultAuthentication type for use +// with apply. +// +// VaultAuthentication defines the authentication method used to authenticate with Vault. +type VaultAuthenticationApplyConfiguration struct { + // type defines the authentication method used to authenticate with Vault. + // Allowed values are AppRole. + // When set to AppRole, the plugin uses AppRole credentials to authenticate with Vault. + Type *configv1.VaultAuthenticationType `json:"type,omitempty"` + // appRole defines the configuration for AppRole authentication. + // This field must be set when type is AppRole, and must be unset otherwise. + AppRole *VaultAppRoleAuthenticationApplyConfiguration `json:"appRole,omitempty"` +} + +// VaultAuthenticationApplyConfiguration constructs a declarative configuration of the VaultAuthentication type for use with +// apply. +func VaultAuthentication() *VaultAuthenticationApplyConfiguration { + return &VaultAuthenticationApplyConfiguration{} +} + +// WithType sets the Type field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Type field is set to the value of the last call. +func (b *VaultAuthenticationApplyConfiguration) WithType(value configv1.VaultAuthenticationType) *VaultAuthenticationApplyConfiguration { + b.Type = &value + return b +} + +// WithAppRole sets the AppRole field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the AppRole field is set to the value of the last call. +func (b *VaultAuthenticationApplyConfiguration) WithAppRole(value *VaultAppRoleAuthenticationApplyConfiguration) *VaultAuthenticationApplyConfiguration { + b.AppRole = value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vaultconfigmapreference.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vaultconfigmapreference.go new file mode 100644 index 0000000000..cb0e46af85 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vaultconfigmapreference.go @@ -0,0 +1,28 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// VaultConfigMapReferenceApplyConfiguration represents a declarative configuration of the VaultConfigMapReference type for use +// with apply. +// +// VaultConfigMapReference references a ConfigMap in the openshift-config namespace. +type VaultConfigMapReferenceApplyConfiguration struct { + // name is the metadata.name of the referenced ConfigMap in the openshift-config namespace. + // The name must be a valid DNS subdomain name: it must contain no more than 253 characters, + // contain only lowercase alphanumeric characters, '-' or '.', and start and end with an alphanumeric character. + Name *string `json:"name,omitempty"` +} + +// VaultConfigMapReferenceApplyConfiguration constructs a declarative configuration of the VaultConfigMapReference type for use with +// apply. +func VaultConfigMapReference() *VaultConfigMapReferenceApplyConfiguration { + return &VaultConfigMapReferenceApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *VaultConfigMapReferenceApplyConfiguration) WithName(value string) *VaultConfigMapReferenceApplyConfiguration { + b.Name = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vaultkmsconfig.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vaultkmsconfig.go new file mode 100644 index 0000000000..7602f33e3b --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vaultkmsconfig.go @@ -0,0 +1,125 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// VaultKMSConfigApplyConfiguration represents a declarative configuration of the VaultKMSConfig type for use +// with apply. +// +// VaultKMSConfig defines the KMS plugin configuration specific to Vault KMS +type VaultKMSConfigApplyConfiguration struct { + // kmsPluginImage specifies the container image for the HashiCorp Vault KMS plugin. + // + // The image must be a fully qualified OCI image pull spec with a SHA256 digest. + // The format is: host[:port][/namespace]/name@sha256: + // where the digest must be 64 characters long and consist only of lowercase hexadecimal characters, a-f and 0-9. + // The total length must be between 75 and 447 characters. + // + // Short names (e.g., "vault-plugin" or "hashicorp/vault-plugin") are not allowed. + // The registry hostname must be included and must contain at least one dot. + // Image tags (e.g., ":latest", ":v1.0.0") are not allowed. + // + // Consult the OpenShift documentation for compatible plugin versions with your cluster version, + // then obtain the image digest for that version from HashiCorp's container registry. + // + // For disconnected environments, mirror the plugin image to an accessible registry + // and reference the mirrored location with its digest. + KMSPluginImage *string `json:"kmsPluginImage,omitempty"` + // vaultAddress specifies the address of the HashiCorp Vault instance. + // The value must be a valid HTTPS URL containing only scheme, host, and optional port. + // Paths, user info, query parameters, and fragments are not allowed. + // + // Format: https://hostname[:port] + // Example: https://vault.example.com:8200 + // + // The value must be between 1 and 512 characters. + VaultAddress *string `json:"vaultAddress,omitempty"` + // vaultNamespace specifies the Vault namespace where the Transit secrets engine is mounted. + // This is only applicable for Vault Enterprise installations. + // When this field is not set, no namespace is used. + // + // The value must be between 1 and 4096 characters. + // The namespace cannot end with a forward slash, cannot contain spaces, and cannot be one of the reserved strings: root, sys, audit, auth, cubbyhole, or identity. + VaultNamespace *string `json:"vaultNamespace,omitempty"` + // tls contains the TLS configuration for connecting to the Vault server. + // When this field is not set, system default TLS settings are used. + TLS *VaultTLSConfigApplyConfiguration `json:"tls,omitempty"` + // authentication defines the authentication method used to authenticate with Vault. + Authentication *VaultAuthenticationApplyConfiguration `json:"authentication,omitempty"` + // transitMount specifies the mount path of the Vault Transit engine. + // The value must be between 1 and 1024 characters when specified. + // + // When omitted, this means the user has no opinion and the platform is left + // to choose a reasonable default. These defaults are subject to change over time. + // The current default is "transit". + // + // The mount path cannot start or end with a forward slash, cannot contain spaces, + // and cannot contain consecutive forward slashes. + TransitMount *string `json:"transitMount,omitempty"` + // transitKey specifies the name of the encryption key in Vault's Transit engine. + // This key is used to encrypt and decrypt data. + // + // The key name must be between 1 and 512 characters and cannot contain spaces or forward slashes. + TransitKey *string `json:"transitKey,omitempty"` +} + +// VaultKMSConfigApplyConfiguration constructs a declarative configuration of the VaultKMSConfig type for use with +// apply. +func VaultKMSConfig() *VaultKMSConfigApplyConfiguration { + return &VaultKMSConfigApplyConfiguration{} +} + +// WithKMSPluginImage sets the KMSPluginImage field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the KMSPluginImage field is set to the value of the last call. +func (b *VaultKMSConfigApplyConfiguration) WithKMSPluginImage(value string) *VaultKMSConfigApplyConfiguration { + b.KMSPluginImage = &value + return b +} + +// WithVaultAddress sets the VaultAddress field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the VaultAddress field is set to the value of the last call. +func (b *VaultKMSConfigApplyConfiguration) WithVaultAddress(value string) *VaultKMSConfigApplyConfiguration { + b.VaultAddress = &value + return b +} + +// WithVaultNamespace sets the VaultNamespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the VaultNamespace field is set to the value of the last call. +func (b *VaultKMSConfigApplyConfiguration) WithVaultNamespace(value string) *VaultKMSConfigApplyConfiguration { + b.VaultNamespace = &value + return b +} + +// WithTLS sets the TLS field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the TLS field is set to the value of the last call. +func (b *VaultKMSConfigApplyConfiguration) WithTLS(value *VaultTLSConfigApplyConfiguration) *VaultKMSConfigApplyConfiguration { + b.TLS = value + return b +} + +// WithAuthentication sets the Authentication field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Authentication field is set to the value of the last call. +func (b *VaultKMSConfigApplyConfiguration) WithAuthentication(value *VaultAuthenticationApplyConfiguration) *VaultKMSConfigApplyConfiguration { + b.Authentication = value + return b +} + +// WithTransitMount sets the TransitMount field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the TransitMount field is set to the value of the last call. +func (b *VaultKMSConfigApplyConfiguration) WithTransitMount(value string) *VaultKMSConfigApplyConfiguration { + b.TransitMount = &value + return b +} + +// WithTransitKey sets the TransitKey field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the TransitKey field is set to the value of the last call. +func (b *VaultKMSConfigApplyConfiguration) WithTransitKey(value string) *VaultKMSConfigApplyConfiguration { + b.TransitKey = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vaultsecretreference.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vaultsecretreference.go new file mode 100644 index 0000000000..5918611ed7 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vaultsecretreference.go @@ -0,0 +1,28 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// VaultSecretReferenceApplyConfiguration represents a declarative configuration of the VaultSecretReference type for use +// with apply. +// +// VaultSecretReference references a secret in the openshift-config namespace. +type VaultSecretReferenceApplyConfiguration struct { + // name is the metadata.name of the referenced secret in the openshift-config namespace. + // The name must be a valid DNS subdomain name: it must contain no more than 253 characters, + // contain only lowercase alphanumeric characters, '-' or '.', and start and end with an alphanumeric character. + Name *string `json:"name,omitempty"` +} + +// VaultSecretReferenceApplyConfiguration constructs a declarative configuration of the VaultSecretReference type for use with +// apply. +func VaultSecretReference() *VaultSecretReferenceApplyConfiguration { + return &VaultSecretReferenceApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *VaultSecretReferenceApplyConfiguration) WithName(value string) *VaultSecretReferenceApplyConfiguration { + b.Name = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vaulttlsconfig.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vaulttlsconfig.go new file mode 100644 index 0000000000..9fba4e1a42 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vaulttlsconfig.go @@ -0,0 +1,58 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// VaultTLSConfigApplyConfiguration represents a declarative configuration of the VaultTLSConfig type for use +// with apply. +// +// VaultTLSConfig contains TLS configuration for connecting to Vault. +type VaultTLSConfigApplyConfiguration struct { + // caBundle references a ConfigMap in the openshift-config namespace containing + // the CA certificate bundle used to verify the TLS connection to the Vault server. + // The ConfigMap must contain the CA bundle in the key "ca-bundle.crt". + // When this field is not set, the system's trusted CA certificates are used. + // + // The namespace for the ConfigMap is openshift-config. + // + // Example ConfigMap: + // apiVersion: v1 + // kind: ConfigMap + // metadata: + // name: vault-ca-bundle + // namespace: openshift-config + // data: + // ca-bundle.crt: | + // -----BEGIN CERTIFICATE----- + // ... + // -----END CERTIFICATE----- + CABundle *VaultConfigMapReferenceApplyConfiguration `json:"caBundle,omitempty"` + // serverName specifies the Server Name Indication (SNI) to use when connecting to Vault via TLS. + // This is useful when the Vault server's hostname doesn't match its TLS certificate. + // When this field is not set, the hostname from vaultAddress is used for SNI. + // + // The value must be a valid DNS hostname: it must contain no more than 253 characters, + // contain only lowercase alphanumeric characters, '-' or '.', and start and end with an alphanumeric character. + ServerName *string `json:"serverName,omitempty"` +} + +// VaultTLSConfigApplyConfiguration constructs a declarative configuration of the VaultTLSConfig type for use with +// apply. +func VaultTLSConfig() *VaultTLSConfigApplyConfiguration { + return &VaultTLSConfigApplyConfiguration{} +} + +// WithCABundle sets the CABundle field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CABundle field is set to the value of the last call. +func (b *VaultTLSConfigApplyConfiguration) WithCABundle(value *VaultConfigMapReferenceApplyConfiguration) *VaultTLSConfigApplyConfiguration { + b.CABundle = value + return b +} + +// WithServerName sets the ServerName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ServerName field is set to the value of the last call. +func (b *VaultTLSConfigApplyConfiguration) WithServerName(value string) *VaultTLSConfigApplyConfiguration { + b.ServerName = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/alertmanagercustomconfig.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/alertmanagercustomconfig.go index 4e90578a18..c47130a180 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/alertmanagercustomconfig.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/alertmanagercustomconfig.go @@ -47,7 +47,7 @@ type AlertmanagerCustomConfigApplyConfiguration struct { // - name: memory // request: 40Mi // limit: null - // Maximum length for this list is 10. + // Maximum length for this list is 5. // Minimum length for this list is 1. // Each resource name must be unique within this list. Resources []ContainerResourceApplyConfiguration `json:"resources,omitempty"` diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/clustermonitoringspec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/clustermonitoringspec.go index 31943fe050..35ec6d14e6 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/clustermonitoringspec.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/clustermonitoringspec.go @@ -50,6 +50,34 @@ type ClusterMonitoringSpecApplyConfiguration struct { // about the state of OpenShift-specific Kubernetes objects, such as routes, builds, and deployments. // When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time. OpenShiftStateMetricsConfig *OpenShiftStateMetricsConfigApplyConfiguration `json:"openShiftStateMetricsConfig,omitempty"` + // telemeterClientConfig is an optional field that can be used to configure the Telemeter Client + // component that runs in the openshift-monitoring namespace. The Telemeter Client collects + // selected monitoring metrics and forwards them to Red Hat for telemetry purposes. + // When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time. + // When set, at least one field must be specified within telemeterClientConfig. + TelemeterClientConfig *TelemeterClientConfigApplyConfiguration `json:"telemeterClientConfig,omitempty"` + // thanosQuerierConfig is an optional field that can be used to configure the Thanos Querier + // component that runs in the openshift-monitoring namespace. The Thanos Querier provides + // a global query view by aggregating and deduplicating metrics from multiple Prometheus instances. + // When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time. + // The current default deploys the Thanos Querier on linux nodes with 5m CPU and 12Mi memory + // requests, and no custom tolerations or topology spread constraints. + // When set, at least one field must be specified within thanosQuerierConfig. + ThanosQuerierConfig *ThanosQuerierConfigApplyConfiguration `json:"thanosQuerierConfig,omitempty"` + // nodeExporterConfig is an optional field that can be used to configure the node-exporter agent + // that runs as a DaemonSet in the openshift-monitoring namespace. The node-exporter agent collects + // hardware and OS-level metrics from every node in the cluster. + // When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time. + NodeExporterConfig *NodeExporterConfigApplyConfiguration `json:"nodeExporterConfig,omitempty"` + // monitoringPluginConfig is an optional field that can be used to configure the monitoring plugin + // that runs as a dynamic plugin of the OpenShift web console. The monitoring plugin provides + // the monitoring UI in the OpenShift web console for visualizing metrics, alerts, and dashboards. + // When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time. + // The current default deploys the monitoring-plugin as a single-replica Deployment + // on linux nodes with 10m CPU and 50Mi memory requests, and no custom tolerations + // or topology spread constraints. + // When set, at least one field must be specified within monitoringPluginConfig. + MonitoringPluginConfig *MonitoringPluginConfigApplyConfiguration `json:"monitoringPluginConfig,omitempty"` } // ClusterMonitoringSpecApplyConfiguration constructs a declarative configuration of the ClusterMonitoringSpec type for use with @@ -113,3 +141,35 @@ func (b *ClusterMonitoringSpecApplyConfiguration) WithOpenShiftStateMetricsConfi b.OpenShiftStateMetricsConfig = value return b } + +// WithTelemeterClientConfig sets the TelemeterClientConfig field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the TelemeterClientConfig field is set to the value of the last call. +func (b *ClusterMonitoringSpecApplyConfiguration) WithTelemeterClientConfig(value *TelemeterClientConfigApplyConfiguration) *ClusterMonitoringSpecApplyConfiguration { + b.TelemeterClientConfig = value + return b +} + +// WithThanosQuerierConfig sets the ThanosQuerierConfig field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ThanosQuerierConfig field is set to the value of the last call. +func (b *ClusterMonitoringSpecApplyConfiguration) WithThanosQuerierConfig(value *ThanosQuerierConfigApplyConfiguration) *ClusterMonitoringSpecApplyConfiguration { + b.ThanosQuerierConfig = value + return b +} + +// WithNodeExporterConfig sets the NodeExporterConfig field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the NodeExporterConfig field is set to the value of the last call. +func (b *ClusterMonitoringSpecApplyConfiguration) WithNodeExporterConfig(value *NodeExporterConfigApplyConfiguration) *ClusterMonitoringSpecApplyConfiguration { + b.NodeExporterConfig = value + return b +} + +// WithMonitoringPluginConfig sets the MonitoringPluginConfig field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MonitoringPluginConfig field is set to the value of the last call. +func (b *ClusterMonitoringSpecApplyConfiguration) WithMonitoringPluginConfig(value *MonitoringPluginConfigApplyConfiguration) *ClusterMonitoringSpecApplyConfiguration { + b.MonitoringPluginConfig = value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/containerresource.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/containerresource.go index d600828b01..2240e1b154 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/containerresource.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/containerresource.go @@ -9,6 +9,12 @@ import ( // ContainerResourceApplyConfiguration represents a declarative configuration of the ContainerResource type for use // with apply. // +// MaxItems on []ContainerResource fields is kept at 5 to stay within the +// Kubernetes CRD CEL validation cost budget (StaticEstimatedCRDCostLimit). +// The quantity() CEL function has a high fixed estimated cost per invocation, +// and the limit-vs-request comparison rule is costed per maxItems per location. +// With multiple structs in ClusterMonitoringSpec embedding []ContainerResource, +// maxItems > 5 causes the total estimated rule cost to exceed the budget. // ContainerResource defines a single resource requirement for a container. type ContainerResourceApplyConfiguration struct { // name of the resource (e.g. "cpu", "memory", "hugepages-2Mi"). @@ -18,6 +24,7 @@ type ContainerResourceApplyConfiguration struct { // request is the minimum amount of the resource required (e.g. "2Mi", "1Gi"). // This field is optional. // When limit is specified, request cannot be greater than limit. + // The value must be greater than 0 when specified. Request *resource.Quantity `json:"request,omitempty"` // limit is the maximum amount of the resource allowed (e.g. "2Mi", "1Gi"). // This field is optional. diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/metricsserverconfig.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/metricsserverconfig.go index ea4d945563..bc77df9d2f 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/metricsserverconfig.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/metricsserverconfig.go @@ -58,7 +58,7 @@ type MetricsServerConfigApplyConfiguration struct { // - name: memory // request: 40Mi // limit: null - // Maximum length for this list is 10. + // Maximum length for this list is 5. // Minimum length for this list is 1. // Each resource name must be unique within this list. Resources []ContainerResourceApplyConfiguration `json:"resources,omitempty"` diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/monitoringpluginconfig.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/monitoringpluginconfig.go new file mode 100644 index 0000000000..6f10b30e5d --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/monitoringpluginconfig.go @@ -0,0 +1,112 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1 "k8s.io/api/core/v1" +) + +// MonitoringPluginConfigApplyConfiguration represents a declarative configuration of the MonitoringPluginConfig type for use +// with apply. +// +// MonitoringPluginConfig provides configuration options for the monitoring plugin +// that runs as a dynamic plugin of the OpenShift web console. +// The monitoring plugin provides the monitoring UI in the OpenShift web console +// for visualizing metrics, alerts, and dashboards. +// At least one field must be specified; an empty monitoringPluginConfig object is not allowed. +type MonitoringPluginConfigApplyConfiguration struct { + // nodeSelector defines the nodes on which the Pods are scheduled. + // nodeSelector is optional. + // + // When omitted, this means the user has no opinion and the platform is left + // to choose reasonable defaults. These defaults are subject to change over time. + // The current default value is `kubernetes.io/os: linux`. + // When specified, nodeSelector must contain at least 1 entry and must not contain more than 10 entries. + NodeSelector map[string]string `json:"nodeSelector,omitempty"` + // resources defines the compute resource requests and limits for the monitoring-plugin container. + // This includes CPU, memory and HugePages constraints to help control scheduling and resource usage. + // When not specified, defaults are used by the platform. Requests cannot exceed limits. + // This field is optional. + // More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + // This is a simplified API that maps to Kubernetes ResourceRequirements. + // The current default values are: + // resources: + // - name: cpu + // request: 10m + // - name: memory + // request: 50Mi + // + // When specified, resources must contain at least 1 entry and must not exceed 5 entries. + Resources []ContainerResourceApplyConfiguration `json:"resources,omitempty"` + // tolerations defines the tolerations required for the monitoring-plugin Pods. + // This field is optional. + // + // When omitted, the monitoring-plugin Pods will not have any tolerations, which + // means they will only be scheduled on nodes with no taints. + // When specified, tolerations must contain at least 1 entry and must not contain more than 10 entries. + Tolerations []v1.Toleration `json:"tolerations,omitempty"` + // topologySpreadConstraints defines rules for how monitoring-plugin Pods should be distributed + // across topology domains such as zones, nodes, or other user-defined labels. + // topologySpreadConstraints is optional. + // This helps improve high availability and resource efficiency by avoiding placing + // too many replicas in the same failure domain. + // + // When omitted, this means no opinion and the platform is left to choose a default, which is subject to change over time. + // This field maps directly to the `topologySpreadConstraints` field in the Pod spec. + // Default is empty list. + // When specified, this list must contain at least 1 entry and must not exceed 10 entries. + TopologySpreadConstraints []v1.TopologySpreadConstraint `json:"topologySpreadConstraints,omitempty"` +} + +// MonitoringPluginConfigApplyConfiguration constructs a declarative configuration of the MonitoringPluginConfig type for use with +// apply. +func MonitoringPluginConfig() *MonitoringPluginConfigApplyConfiguration { + return &MonitoringPluginConfigApplyConfiguration{} +} + +// WithNodeSelector puts the entries into the NodeSelector field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the NodeSelector field, +// overwriting an existing map entries in NodeSelector field with the same key. +func (b *MonitoringPluginConfigApplyConfiguration) WithNodeSelector(entries map[string]string) *MonitoringPluginConfigApplyConfiguration { + if b.NodeSelector == nil && len(entries) > 0 { + b.NodeSelector = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.NodeSelector[k] = v + } + return b +} + +// WithResources adds the given value to the Resources field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Resources field. +func (b *MonitoringPluginConfigApplyConfiguration) WithResources(values ...*ContainerResourceApplyConfiguration) *MonitoringPluginConfigApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithResources") + } + b.Resources = append(b.Resources, *values[i]) + } + return b +} + +// WithTolerations adds the given value to the Tolerations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Tolerations field. +func (b *MonitoringPluginConfigApplyConfiguration) WithTolerations(values ...v1.Toleration) *MonitoringPluginConfigApplyConfiguration { + for i := range values { + b.Tolerations = append(b.Tolerations, values[i]) + } + return b +} + +// WithTopologySpreadConstraints adds the given value to the TopologySpreadConstraints field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the TopologySpreadConstraints field. +func (b *MonitoringPluginConfigApplyConfiguration) WithTopologySpreadConstraints(values ...v1.TopologySpreadConstraint) *MonitoringPluginConfigApplyConfiguration { + for i := range values { + b.TopologySpreadConstraints = append(b.TopologySpreadConstraints, values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/nodeexportercollectorbuddyinfoconfig.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/nodeexportercollectorbuddyinfoconfig.go new file mode 100644 index 0000000000..ba6cedbf2a --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/nodeexportercollectorbuddyinfoconfig.go @@ -0,0 +1,37 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + configv1alpha1 "github.com/openshift/api/config/v1alpha1" +) + +// NodeExporterCollectorBuddyInfoConfigApplyConfiguration represents a declarative configuration of the NodeExporterCollectorBuddyInfoConfig type for use +// with apply. +// +// NodeExporterCollectorBuddyInfoConfig provides configuration for the buddyinfo collector +// of the node-exporter agent. The buddyinfo collector collects statistics about memory fragmentation +// from the node_buddyinfo_blocks metric using data from /proc/buddyinfo. +// It is disabled by default. +type NodeExporterCollectorBuddyInfoConfigApplyConfiguration struct { + // collectionPolicy declares whether the buddyinfo collector collects metrics. + // This field is required. + // Valid values are "Collect" and "DoNotCollect". + // When set to "Collect", the buddyinfo collector is active and memory fragmentation statistics are collected. + // When set to "DoNotCollect", the buddyinfo collector is inactive. + CollectionPolicy *configv1alpha1.NodeExporterCollectorCollectionPolicy `json:"collectionPolicy,omitempty"` +} + +// NodeExporterCollectorBuddyInfoConfigApplyConfiguration constructs a declarative configuration of the NodeExporterCollectorBuddyInfoConfig type for use with +// apply. +func NodeExporterCollectorBuddyInfoConfig() *NodeExporterCollectorBuddyInfoConfigApplyConfiguration { + return &NodeExporterCollectorBuddyInfoConfigApplyConfiguration{} +} + +// WithCollectionPolicy sets the CollectionPolicy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CollectionPolicy field is set to the value of the last call. +func (b *NodeExporterCollectorBuddyInfoConfigApplyConfiguration) WithCollectionPolicy(value configv1alpha1.NodeExporterCollectorCollectionPolicy) *NodeExporterCollectorBuddyInfoConfigApplyConfiguration { + b.CollectionPolicy = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/nodeexportercollectorconfig.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/nodeexportercollectorconfig.go new file mode 100644 index 0000000000..cb1c338042 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/nodeexportercollectorconfig.go @@ -0,0 +1,169 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +// NodeExporterCollectorConfigApplyConfiguration represents a declarative configuration of the NodeExporterCollectorConfig type for use +// with apply. +// +// NodeExporterCollectorConfig defines settings for individual collectors +// of the node-exporter agent. Each collector can be individually set to collect or not collect metrics. +// At least one collector must be specified. +type NodeExporterCollectorConfigApplyConfiguration struct { + // cpuFreq configures the cpufreq collector, which collects CPU frequency statistics. + // cpuFreq is optional. + // When omitted, this means no opinion and the platform is left to choose a reasonable default, + // which is subject to change over time. The current default is disabled. + // Consider enabling when you need to observe CPU frequency scaling; expect higher CPU usage on + // many-core nodes when collectionPolicy is Collect. + CpuFreq *NodeExporterCollectorCpufreqConfigApplyConfiguration `json:"cpuFreq,omitempty"` + // tcpStat configures the tcpstat collector, which collects TCP connection statistics. + // tcpStat is optional. + // When omitted, this means no opinion and the platform is left to choose a reasonable default, + // which is subject to change over time. The current default is disabled. + // Enable when debugging TCP connection behavior or capacity at the node level. + TcpStat *NodeExporterCollectorTcpStatConfigApplyConfiguration `json:"tcpStat,omitempty"` + // ethtool configures the ethtool collector, which collects ethernet device statistics. + // ethtool is optional. + // When omitted, this means no opinion and the platform is left to choose a reasonable default, + // which is subject to change over time. The current default is disabled. + // Enable when you need NIC driver-level ethtool metrics beyond generic netdev counters. + Ethtool *NodeExporterCollectorEthtoolConfigApplyConfiguration `json:"ethtool,omitempty"` + // netDev configures the netdev collector, which collects network device statistics. + // netDev is optional. + // When omitted, this means no opinion and the platform is left to choose a reasonable default, + // which is subject to change over time. The current default is enabled. + // Turn off if you must reduce per-interface metric cardinality on hosts with many virtual interfaces. + NetDev *NodeExporterCollectorNetDevConfigApplyConfiguration `json:"netDev,omitempty"` + // netClass configures the netclass collector, which collects information about network devices. + // netClass is optional. + // When omitted, this means no opinion and the platform is left to choose a reasonable default, + // which is subject to change over time. The current default is enabled with netlink mode active. + // Use statsGatherer when sysfs vs netlink implementation matters or when matching node_exporter tuning. + NetClass *NodeExporterCollectorNetClassConfigApplyConfiguration `json:"netClass,omitempty"` + // buddyInfo configures the buddyinfo collector, which collects statistics about memory + // fragmentation from the node_buddyinfo_blocks metric. This metric collects data from /proc/buddyinfo. + // buddyInfo is optional. + // When omitted, this means no opinion and the platform is left to choose a reasonable default, + // which is subject to change over time. The current default is disabled. + // Enable when investigating kernel memory fragmentation; typically for advanced troubleshooting only. + BuddyInfo *NodeExporterCollectorBuddyInfoConfigApplyConfiguration `json:"buddyInfo,omitempty"` + // mountStats configures the mountstats collector, which collects statistics about NFS volume + // I/O activities. + // mountStats is optional. + // When omitted, this means no opinion and the platform is left to choose a reasonable default, + // which is subject to change over time. The current default is disabled. + // Enabling this collector may produce metrics with high cardinality. If you enable this + // collector, closely monitor the prometheus-k8s deployment for excessive memory usage. + // Enable when you care about per-mount NFS client statistics. + MountStats *NodeExporterCollectorMountStatsConfigApplyConfiguration `json:"mountStats,omitempty"` + // ksmd configures the ksmd collector, which collects statistics from the kernel same-page + // merger daemon. + // ksmd is optional. + // When omitted, this means no opinion and the platform is left to choose a reasonable default, + // which is subject to change over time. The current default is disabled. + // Enable on nodes where KSM is in use and you want visibility into merging activity. + Ksmd *NodeExporterCollectorKSMDConfigApplyConfiguration `json:"ksmd,omitempty"` + // processes configures the processes collector, which collects statistics from processes and + // threads running in the system. + // processes is optional. + // When omitted, this means no opinion and the platform is left to choose a reasonable default, + // which is subject to change over time. The current default is disabled. + // Enable for process/thread-level insight; can be expensive on busy nodes. + Processes *NodeExporterCollectorProcessesConfigApplyConfiguration `json:"processes,omitempty"` + // systemd configures the systemd collector, which collects statistics on the systemd daemon + // and its managed services. + // systemd is optional. + // When omitted, this means no opinion and the platform is left to choose a reasonable default, + // which is subject to change over time. The current default is disabled. + // Enabling this collector with a long list of selected units may produce metrics with high + // cardinality. If you enable this collector, closely monitor the prometheus-k8s deployment + // for excessive memory usage. + // Enable when you need metrics for specific units; scope units carefully. + Systemd *NodeExporterCollectorSystemdConfigApplyConfiguration `json:"systemd,omitempty"` +} + +// NodeExporterCollectorConfigApplyConfiguration constructs a declarative configuration of the NodeExporterCollectorConfig type for use with +// apply. +func NodeExporterCollectorConfig() *NodeExporterCollectorConfigApplyConfiguration { + return &NodeExporterCollectorConfigApplyConfiguration{} +} + +// WithCpuFreq sets the CpuFreq field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CpuFreq field is set to the value of the last call. +func (b *NodeExporterCollectorConfigApplyConfiguration) WithCpuFreq(value *NodeExporterCollectorCpufreqConfigApplyConfiguration) *NodeExporterCollectorConfigApplyConfiguration { + b.CpuFreq = value + return b +} + +// WithTcpStat sets the TcpStat field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the TcpStat field is set to the value of the last call. +func (b *NodeExporterCollectorConfigApplyConfiguration) WithTcpStat(value *NodeExporterCollectorTcpStatConfigApplyConfiguration) *NodeExporterCollectorConfigApplyConfiguration { + b.TcpStat = value + return b +} + +// WithEthtool sets the Ethtool field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Ethtool field is set to the value of the last call. +func (b *NodeExporterCollectorConfigApplyConfiguration) WithEthtool(value *NodeExporterCollectorEthtoolConfigApplyConfiguration) *NodeExporterCollectorConfigApplyConfiguration { + b.Ethtool = value + return b +} + +// WithNetDev sets the NetDev field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the NetDev field is set to the value of the last call. +func (b *NodeExporterCollectorConfigApplyConfiguration) WithNetDev(value *NodeExporterCollectorNetDevConfigApplyConfiguration) *NodeExporterCollectorConfigApplyConfiguration { + b.NetDev = value + return b +} + +// WithNetClass sets the NetClass field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the NetClass field is set to the value of the last call. +func (b *NodeExporterCollectorConfigApplyConfiguration) WithNetClass(value *NodeExporterCollectorNetClassConfigApplyConfiguration) *NodeExporterCollectorConfigApplyConfiguration { + b.NetClass = value + return b +} + +// WithBuddyInfo sets the BuddyInfo field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the BuddyInfo field is set to the value of the last call. +func (b *NodeExporterCollectorConfigApplyConfiguration) WithBuddyInfo(value *NodeExporterCollectorBuddyInfoConfigApplyConfiguration) *NodeExporterCollectorConfigApplyConfiguration { + b.BuddyInfo = value + return b +} + +// WithMountStats sets the MountStats field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MountStats field is set to the value of the last call. +func (b *NodeExporterCollectorConfigApplyConfiguration) WithMountStats(value *NodeExporterCollectorMountStatsConfigApplyConfiguration) *NodeExporterCollectorConfigApplyConfiguration { + b.MountStats = value + return b +} + +// WithKsmd sets the Ksmd field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Ksmd field is set to the value of the last call. +func (b *NodeExporterCollectorConfigApplyConfiguration) WithKsmd(value *NodeExporterCollectorKSMDConfigApplyConfiguration) *NodeExporterCollectorConfigApplyConfiguration { + b.Ksmd = value + return b +} + +// WithProcesses sets the Processes field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Processes field is set to the value of the last call. +func (b *NodeExporterCollectorConfigApplyConfiguration) WithProcesses(value *NodeExporterCollectorProcessesConfigApplyConfiguration) *NodeExporterCollectorConfigApplyConfiguration { + b.Processes = value + return b +} + +// WithSystemd sets the Systemd field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Systemd field is set to the value of the last call. +func (b *NodeExporterCollectorConfigApplyConfiguration) WithSystemd(value *NodeExporterCollectorSystemdConfigApplyConfiguration) *NodeExporterCollectorConfigApplyConfiguration { + b.Systemd = value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/nodeexportercollectorcpufreqconfig.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/nodeexportercollectorcpufreqconfig.go new file mode 100644 index 0000000000..65fe3f11fe --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/nodeexportercollectorcpufreqconfig.go @@ -0,0 +1,36 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + configv1alpha1 "github.com/openshift/api/config/v1alpha1" +) + +// NodeExporterCollectorCpufreqConfigApplyConfiguration represents a declarative configuration of the NodeExporterCollectorCpufreqConfig type for use +// with apply. +// +// NodeExporterCollectorCpufreqConfig provides configuration for the cpufreq collector +// of the node-exporter agent. The cpufreq collector collects CPU frequency statistics. +// It is disabled by default. +type NodeExporterCollectorCpufreqConfigApplyConfiguration struct { + // collectionPolicy declares whether the cpufreq collector collects metrics. + // This field is required. + // Valid values are "Collect" and "DoNotCollect". + // When set to "Collect", the cpufreq collector is active and CPU frequency statistics are collected. + // When set to "DoNotCollect", the cpufreq collector is inactive. + CollectionPolicy *configv1alpha1.NodeExporterCollectorCollectionPolicy `json:"collectionPolicy,omitempty"` +} + +// NodeExporterCollectorCpufreqConfigApplyConfiguration constructs a declarative configuration of the NodeExporterCollectorCpufreqConfig type for use with +// apply. +func NodeExporterCollectorCpufreqConfig() *NodeExporterCollectorCpufreqConfigApplyConfiguration { + return &NodeExporterCollectorCpufreqConfigApplyConfiguration{} +} + +// WithCollectionPolicy sets the CollectionPolicy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CollectionPolicy field is set to the value of the last call. +func (b *NodeExporterCollectorCpufreqConfigApplyConfiguration) WithCollectionPolicy(value configv1alpha1.NodeExporterCollectorCollectionPolicy) *NodeExporterCollectorCpufreqConfigApplyConfiguration { + b.CollectionPolicy = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/nodeexportercollectorethtoolconfig.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/nodeexportercollectorethtoolconfig.go new file mode 100644 index 0000000000..396477c1f0 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/nodeexportercollectorethtoolconfig.go @@ -0,0 +1,36 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + configv1alpha1 "github.com/openshift/api/config/v1alpha1" +) + +// NodeExporterCollectorEthtoolConfigApplyConfiguration represents a declarative configuration of the NodeExporterCollectorEthtoolConfig type for use +// with apply. +// +// NodeExporterCollectorEthtoolConfig provides configuration for the ethtool collector +// of the node-exporter agent. The ethtool collector collects ethernet device statistics. +// It is disabled by default. +type NodeExporterCollectorEthtoolConfigApplyConfiguration struct { + // collectionPolicy declares whether the ethtool collector collects metrics. + // This field is required. + // Valid values are "Collect" and "DoNotCollect". + // When set to "Collect", the ethtool collector is active and ethernet device statistics are collected. + // When set to "DoNotCollect", the ethtool collector is inactive. + CollectionPolicy *configv1alpha1.NodeExporterCollectorCollectionPolicy `json:"collectionPolicy,omitempty"` +} + +// NodeExporterCollectorEthtoolConfigApplyConfiguration constructs a declarative configuration of the NodeExporterCollectorEthtoolConfig type for use with +// apply. +func NodeExporterCollectorEthtoolConfig() *NodeExporterCollectorEthtoolConfigApplyConfiguration { + return &NodeExporterCollectorEthtoolConfigApplyConfiguration{} +} + +// WithCollectionPolicy sets the CollectionPolicy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CollectionPolicy field is set to the value of the last call. +func (b *NodeExporterCollectorEthtoolConfigApplyConfiguration) WithCollectionPolicy(value configv1alpha1.NodeExporterCollectorCollectionPolicy) *NodeExporterCollectorEthtoolConfigApplyConfiguration { + b.CollectionPolicy = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/nodeexportercollectorksmdconfig.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/nodeexportercollectorksmdconfig.go new file mode 100644 index 0000000000..fc0ac015a1 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/nodeexportercollectorksmdconfig.go @@ -0,0 +1,37 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + configv1alpha1 "github.com/openshift/api/config/v1alpha1" +) + +// NodeExporterCollectorKSMDConfigApplyConfiguration represents a declarative configuration of the NodeExporterCollectorKSMDConfig type for use +// with apply. +// +// NodeExporterCollectorKSMDConfig provides configuration for the ksmd collector +// of the node-exporter agent. The ksmd collector collects statistics from the kernel +// same-page merger daemon. +// It is disabled by default. +type NodeExporterCollectorKSMDConfigApplyConfiguration struct { + // collectionPolicy declares whether the ksmd collector collects metrics. + // This field is required. + // Valid values are "Collect" and "DoNotCollect". + // When set to "Collect", the ksmd collector is active and kernel same-page merger statistics are collected. + // When set to "DoNotCollect", the ksmd collector is inactive. + CollectionPolicy *configv1alpha1.NodeExporterCollectorCollectionPolicy `json:"collectionPolicy,omitempty"` +} + +// NodeExporterCollectorKSMDConfigApplyConfiguration constructs a declarative configuration of the NodeExporterCollectorKSMDConfig type for use with +// apply. +func NodeExporterCollectorKSMDConfig() *NodeExporterCollectorKSMDConfigApplyConfiguration { + return &NodeExporterCollectorKSMDConfigApplyConfiguration{} +} + +// WithCollectionPolicy sets the CollectionPolicy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CollectionPolicy field is set to the value of the last call. +func (b *NodeExporterCollectorKSMDConfigApplyConfiguration) WithCollectionPolicy(value configv1alpha1.NodeExporterCollectorCollectionPolicy) *NodeExporterCollectorKSMDConfigApplyConfiguration { + b.CollectionPolicy = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/nodeexportercollectormountstatsconfig.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/nodeexportercollectormountstatsconfig.go new file mode 100644 index 0000000000..306bb851aa --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/nodeexportercollectormountstatsconfig.go @@ -0,0 +1,38 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + configv1alpha1 "github.com/openshift/api/config/v1alpha1" +) + +// NodeExporterCollectorMountStatsConfigApplyConfiguration represents a declarative configuration of the NodeExporterCollectorMountStatsConfig type for use +// with apply. +// +// NodeExporterCollectorMountStatsConfig provides configuration for the mountstats collector +// of the node-exporter agent. The mountstats collector collects statistics about NFS volume I/O activities. +// It is disabled by default. +// Enabling this collector may produce metrics with high cardinality. If you enable this +// collector, closely monitor the prometheus-k8s deployment for excessive memory usage. +type NodeExporterCollectorMountStatsConfigApplyConfiguration struct { + // collectionPolicy declares whether the mountstats collector collects metrics. + // This field is required. + // Valid values are "Collect" and "DoNotCollect". + // When set to "Collect", the mountstats collector is active and NFS volume I/O statistics are collected. + // When set to "DoNotCollect", the mountstats collector is inactive. + CollectionPolicy *configv1alpha1.NodeExporterCollectorCollectionPolicy `json:"collectionPolicy,omitempty"` +} + +// NodeExporterCollectorMountStatsConfigApplyConfiguration constructs a declarative configuration of the NodeExporterCollectorMountStatsConfig type for use with +// apply. +func NodeExporterCollectorMountStatsConfig() *NodeExporterCollectorMountStatsConfigApplyConfiguration { + return &NodeExporterCollectorMountStatsConfigApplyConfiguration{} +} + +// WithCollectionPolicy sets the CollectionPolicy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CollectionPolicy field is set to the value of the last call. +func (b *NodeExporterCollectorMountStatsConfigApplyConfiguration) WithCollectionPolicy(value configv1alpha1.NodeExporterCollectorCollectionPolicy) *NodeExporterCollectorMountStatsConfigApplyConfiguration { + b.CollectionPolicy = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/nodeexportercollectornetclasscollectconfig.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/nodeexportercollectornetclasscollectconfig.go new file mode 100644 index 0000000000..321c7c6679 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/nodeexportercollectornetclasscollectconfig.go @@ -0,0 +1,36 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + configv1alpha1 "github.com/openshift/api/config/v1alpha1" +) + +// NodeExporterCollectorNetClassCollectConfigApplyConfiguration represents a declarative configuration of the NodeExporterCollectorNetClassCollectConfig type for use +// with apply. +// +// NodeExporterCollectorNetClassCollectConfig holds configuration options for the netclass collector +// when it is actively collecting metrics. At least one field must be specified. +type NodeExporterCollectorNetClassCollectConfigApplyConfiguration struct { + // statsGatherer selects which implementation the netclass collector uses to gather statistics (sysfs or netlink). + // statsGatherer is optional. + // Valid values are "Sysfs" and "Netlink". + // When set to "Netlink", the netlink implementation is used; when set to "Sysfs", the sysfs implementation is used. + // When omitted, this means no opinion and the platform is left to choose a reasonable default, + // which is subject to change over time. The current default is Netlink. + StatsGatherer *configv1alpha1.NodeExporterNetclassStatsGatherer `json:"statsGatherer,omitempty"` +} + +// NodeExporterCollectorNetClassCollectConfigApplyConfiguration constructs a declarative configuration of the NodeExporterCollectorNetClassCollectConfig type for use with +// apply. +func NodeExporterCollectorNetClassCollectConfig() *NodeExporterCollectorNetClassCollectConfigApplyConfiguration { + return &NodeExporterCollectorNetClassCollectConfigApplyConfiguration{} +} + +// WithStatsGatherer sets the StatsGatherer field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the StatsGatherer field is set to the value of the last call. +func (b *NodeExporterCollectorNetClassCollectConfigApplyConfiguration) WithStatsGatherer(value configv1alpha1.NodeExporterNetclassStatsGatherer) *NodeExporterCollectorNetClassCollectConfigApplyConfiguration { + b.StatsGatherer = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/nodeexportercollectornetclassconfig.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/nodeexportercollectornetclassconfig.go new file mode 100644 index 0000000000..2fe2a6a5ba --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/nodeexportercollectornetclassconfig.go @@ -0,0 +1,53 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + configv1alpha1 "github.com/openshift/api/config/v1alpha1" +) + +// NodeExporterCollectorNetClassConfigApplyConfiguration represents a declarative configuration of the NodeExporterCollectorNetClassConfig type for use +// with apply. +// +// NodeExporterCollectorNetClassConfig provides configuration for the netclass collector +// of the node-exporter agent. The netclass collector collects information about network devices +// such as network speed, MTU, and carrier status. +// It is enabled by default. +// When collectionPolicy is DoNotCollect, the collect field must not be set. +type NodeExporterCollectorNetClassConfigApplyConfiguration struct { + // collectionPolicy declares whether the netclass collector collects metrics. + // This field is required. + // Valid values are "Collect" and "DoNotCollect". + // When set to "Collect", the netclass collector is active and network class information is collected. + // When set to "DoNotCollect", the netclass collector is inactive and the corresponding metrics become unavailable. + // When set to "DoNotCollect", the collect field must not be set. + CollectionPolicy *configv1alpha1.NodeExporterCollectorCollectionPolicy `json:"collectionPolicy,omitempty"` + // collect contains configuration options that apply only when the netclass collector is actively collecting metrics + // (i.e. when collectionPolicy is Collect). + // collect is optional and may be omitted even when collectionPolicy is Collect. + // collect may only be set when collectionPolicy is Collect. + // When set, at least one field must be specified within collect. + Collect *NodeExporterCollectorNetClassCollectConfigApplyConfiguration `json:"collect,omitempty"` +} + +// NodeExporterCollectorNetClassConfigApplyConfiguration constructs a declarative configuration of the NodeExporterCollectorNetClassConfig type for use with +// apply. +func NodeExporterCollectorNetClassConfig() *NodeExporterCollectorNetClassConfigApplyConfiguration { + return &NodeExporterCollectorNetClassConfigApplyConfiguration{} +} + +// WithCollectionPolicy sets the CollectionPolicy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CollectionPolicy field is set to the value of the last call. +func (b *NodeExporterCollectorNetClassConfigApplyConfiguration) WithCollectionPolicy(value configv1alpha1.NodeExporterCollectorCollectionPolicy) *NodeExporterCollectorNetClassConfigApplyConfiguration { + b.CollectionPolicy = &value + return b +} + +// WithCollect sets the Collect field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Collect field is set to the value of the last call. +func (b *NodeExporterCollectorNetClassConfigApplyConfiguration) WithCollect(value *NodeExporterCollectorNetClassCollectConfigApplyConfiguration) *NodeExporterCollectorNetClassConfigApplyConfiguration { + b.Collect = value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/nodeexportercollectornetdevconfig.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/nodeexportercollectornetdevconfig.go new file mode 100644 index 0000000000..b5bbe4c86e --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/nodeexportercollectornetdevconfig.go @@ -0,0 +1,37 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + configv1alpha1 "github.com/openshift/api/config/v1alpha1" +) + +// NodeExporterCollectorNetDevConfigApplyConfiguration represents a declarative configuration of the NodeExporterCollectorNetDevConfig type for use +// with apply. +// +// NodeExporterCollectorNetDevConfig provides configuration for the netdev collector +// of the node-exporter agent. The netdev collector collects network device statistics +// such as bytes, packets, errors, and drops per device. +// It is enabled by default. +type NodeExporterCollectorNetDevConfigApplyConfiguration struct { + // collectionPolicy declares whether the netdev collector collects metrics. + // This field is required. + // Valid values are "Collect" and "DoNotCollect". + // When set to "Collect", the netdev collector is active and network device statistics are collected. + // When set to "DoNotCollect", the netdev collector is inactive and the corresponding metrics become unavailable. + CollectionPolicy *configv1alpha1.NodeExporterCollectorCollectionPolicy `json:"collectionPolicy,omitempty"` +} + +// NodeExporterCollectorNetDevConfigApplyConfiguration constructs a declarative configuration of the NodeExporterCollectorNetDevConfig type for use with +// apply. +func NodeExporterCollectorNetDevConfig() *NodeExporterCollectorNetDevConfigApplyConfiguration { + return &NodeExporterCollectorNetDevConfigApplyConfiguration{} +} + +// WithCollectionPolicy sets the CollectionPolicy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CollectionPolicy field is set to the value of the last call. +func (b *NodeExporterCollectorNetDevConfigApplyConfiguration) WithCollectionPolicy(value configv1alpha1.NodeExporterCollectorCollectionPolicy) *NodeExporterCollectorNetDevConfigApplyConfiguration { + b.CollectionPolicy = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/nodeexportercollectorprocessesconfig.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/nodeexportercollectorprocessesconfig.go new file mode 100644 index 0000000000..71cf2fb59c --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/nodeexportercollectorprocessesconfig.go @@ -0,0 +1,37 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + configv1alpha1 "github.com/openshift/api/config/v1alpha1" +) + +// NodeExporterCollectorProcessesConfigApplyConfiguration represents a declarative configuration of the NodeExporterCollectorProcessesConfig type for use +// with apply. +// +// NodeExporterCollectorProcessesConfig provides configuration for the processes collector +// of the node-exporter agent. The processes collector collects statistics from processes and threads +// running in the system. +// It is disabled by default. +type NodeExporterCollectorProcessesConfigApplyConfiguration struct { + // collectionPolicy declares whether the processes collector collects metrics. + // This field is required. + // Valid values are "Collect" and "DoNotCollect". + // When set to "Collect", the processes collector is active and process/thread statistics are collected. + // When set to "DoNotCollect", the processes collector is inactive. + CollectionPolicy *configv1alpha1.NodeExporterCollectorCollectionPolicy `json:"collectionPolicy,omitempty"` +} + +// NodeExporterCollectorProcessesConfigApplyConfiguration constructs a declarative configuration of the NodeExporterCollectorProcessesConfig type for use with +// apply. +func NodeExporterCollectorProcessesConfig() *NodeExporterCollectorProcessesConfigApplyConfiguration { + return &NodeExporterCollectorProcessesConfigApplyConfiguration{} +} + +// WithCollectionPolicy sets the CollectionPolicy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CollectionPolicy field is set to the value of the last call. +func (b *NodeExporterCollectorProcessesConfigApplyConfiguration) WithCollectionPolicy(value configv1alpha1.NodeExporterCollectorCollectionPolicy) *NodeExporterCollectorProcessesConfigApplyConfiguration { + b.CollectionPolicy = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/nodeexportercollectorsystemdcollectconfig.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/nodeexportercollectorsystemdcollectconfig.go new file mode 100644 index 0000000000..647f7efc0a --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/nodeexportercollectorsystemdcollectconfig.go @@ -0,0 +1,40 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + configv1alpha1 "github.com/openshift/api/config/v1alpha1" +) + +// NodeExporterCollectorSystemdCollectConfigApplyConfiguration represents a declarative configuration of the NodeExporterCollectorSystemdCollectConfig type for use +// with apply. +// +// NodeExporterCollectorSystemdCollectConfig holds configuration options for the systemd collector +// when it is actively collecting metrics. At least one field must be specified. +type NodeExporterCollectorSystemdCollectConfigApplyConfiguration struct { + // units is a list of regular expression patterns that match systemd units to be included + // by the systemd collector. + // units is optional. + // By default, the list is empty, so the collector exposes no metrics for systemd units. + // Each entry is a regular expression pattern and must be at least 1 character and at most 1024 characters. + // Maximum length for this list is 50. + // Minimum length for this list is 1. + // Entries in this list must be unique. + Units []configv1alpha1.NodeExporterSystemdUnit `json:"units,omitempty"` +} + +// NodeExporterCollectorSystemdCollectConfigApplyConfiguration constructs a declarative configuration of the NodeExporterCollectorSystemdCollectConfig type for use with +// apply. +func NodeExporterCollectorSystemdCollectConfig() *NodeExporterCollectorSystemdCollectConfigApplyConfiguration { + return &NodeExporterCollectorSystemdCollectConfigApplyConfiguration{} +} + +// WithUnits adds the given value to the Units field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Units field. +func (b *NodeExporterCollectorSystemdCollectConfigApplyConfiguration) WithUnits(values ...configv1alpha1.NodeExporterSystemdUnit) *NodeExporterCollectorSystemdCollectConfigApplyConfiguration { + for i := range values { + b.Units = append(b.Units, values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/nodeexportercollectorsystemdconfig.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/nodeexportercollectorsystemdconfig.go new file mode 100644 index 0000000000..a1422798de --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/nodeexportercollectorsystemdconfig.go @@ -0,0 +1,55 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + configv1alpha1 "github.com/openshift/api/config/v1alpha1" +) + +// NodeExporterCollectorSystemdConfigApplyConfiguration represents a declarative configuration of the NodeExporterCollectorSystemdConfig type for use +// with apply. +// +// NodeExporterCollectorSystemdConfig provides configuration for the systemd collector +// of the node-exporter agent. The systemd collector collects statistics on the systemd daemon +// and its managed services. +// It is disabled by default. +// Enabling this collector with a long list of selected units may produce metrics with high +// cardinality. If you enable this collector, closely monitor the prometheus-k8s deployment +// for excessive memory usage. +// When collectionPolicy is DoNotCollect, the collect field must not be set. +type NodeExporterCollectorSystemdConfigApplyConfiguration struct { + // collectionPolicy declares whether the systemd collector collects metrics. + // This field is required. + // Valid values are "Collect" and "DoNotCollect". + // When set to "Collect", the systemd collector is active and systemd unit statistics are collected. + // When set to "DoNotCollect", the systemd collector is inactive and the collect field must not be set. + CollectionPolicy *configv1alpha1.NodeExporterCollectorCollectionPolicy `json:"collectionPolicy,omitempty"` + // collect contains configuration options that apply only when the systemd collector is actively collecting metrics + // (i.e. when collectionPolicy is Collect). + // collect is optional and may be omitted even when collectionPolicy is Collect. + // collect may only be set when collectionPolicy is Collect. + // When set, at least one field must be specified within collect. + Collect *NodeExporterCollectorSystemdCollectConfigApplyConfiguration `json:"collect,omitempty"` +} + +// NodeExporterCollectorSystemdConfigApplyConfiguration constructs a declarative configuration of the NodeExporterCollectorSystemdConfig type for use with +// apply. +func NodeExporterCollectorSystemdConfig() *NodeExporterCollectorSystemdConfigApplyConfiguration { + return &NodeExporterCollectorSystemdConfigApplyConfiguration{} +} + +// WithCollectionPolicy sets the CollectionPolicy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CollectionPolicy field is set to the value of the last call. +func (b *NodeExporterCollectorSystemdConfigApplyConfiguration) WithCollectionPolicy(value configv1alpha1.NodeExporterCollectorCollectionPolicy) *NodeExporterCollectorSystemdConfigApplyConfiguration { + b.CollectionPolicy = &value + return b +} + +// WithCollect sets the Collect field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Collect field is set to the value of the last call. +func (b *NodeExporterCollectorSystemdConfigApplyConfiguration) WithCollect(value *NodeExporterCollectorSystemdCollectConfigApplyConfiguration) *NodeExporterCollectorSystemdConfigApplyConfiguration { + b.Collect = value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/nodeexportercollectortcpstatconfig.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/nodeexportercollectortcpstatconfig.go new file mode 100644 index 0000000000..20f77e8808 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/nodeexportercollectortcpstatconfig.go @@ -0,0 +1,36 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + configv1alpha1 "github.com/openshift/api/config/v1alpha1" +) + +// NodeExporterCollectorTcpStatConfigApplyConfiguration represents a declarative configuration of the NodeExporterCollectorTcpStatConfig type for use +// with apply. +// +// NodeExporterCollectorTcpStatConfig provides configuration for the tcpstat collector +// of the node-exporter agent. The tcpstat collector collects TCP connection statistics. +// It is disabled by default. +type NodeExporterCollectorTcpStatConfigApplyConfiguration struct { + // collectionPolicy declares whether the tcpstat collector collects metrics. + // This field is required. + // Valid values are "Collect" and "DoNotCollect". + // When set to "Collect", the tcpstat collector is active and TCP connection statistics are collected. + // When set to "DoNotCollect", the tcpstat collector is inactive. + CollectionPolicy *configv1alpha1.NodeExporterCollectorCollectionPolicy `json:"collectionPolicy,omitempty"` +} + +// NodeExporterCollectorTcpStatConfigApplyConfiguration constructs a declarative configuration of the NodeExporterCollectorTcpStatConfig type for use with +// apply. +func NodeExporterCollectorTcpStatConfig() *NodeExporterCollectorTcpStatConfigApplyConfiguration { + return &NodeExporterCollectorTcpStatConfigApplyConfiguration{} +} + +// WithCollectionPolicy sets the CollectionPolicy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CollectionPolicy field is set to the value of the last call. +func (b *NodeExporterCollectorTcpStatConfigApplyConfiguration) WithCollectionPolicy(value configv1alpha1.NodeExporterCollectorCollectionPolicy) *NodeExporterCollectorTcpStatConfigApplyConfiguration { + b.CollectionPolicy = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/nodeexporterconfig.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/nodeexporterconfig.go new file mode 100644 index 0000000000..8c6a288f50 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/nodeexporterconfig.go @@ -0,0 +1,158 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + configv1alpha1 "github.com/openshift/api/config/v1alpha1" + v1 "k8s.io/api/core/v1" +) + +// NodeExporterConfigApplyConfiguration represents a declarative configuration of the NodeExporterConfig type for use +// with apply. +// +// NodeExporterConfig provides configuration options for the node-exporter agent +// that runs as a DaemonSet in the `openshift-monitoring` namespace. The node-exporter agent collects +// hardware and OS-level metrics from every node in the cluster, including CPU, memory, disk, and +// network statistics. +// At least one field must be specified. +type NodeExporterConfigApplyConfiguration struct { + // nodeSelector defines the nodes on which the Pods are scheduled. + // nodeSelector is optional. + // + // When omitted, this means the user has no opinion and the platform is left + // to choose reasonable defaults. These defaults are subject to change over time. + // The current default value is `kubernetes.io/os: linux`. + // When specified, nodeSelector must contain at least 1 entry and must not contain more than 10 entries. + NodeSelector map[string]string `json:"nodeSelector,omitempty"` + // resources defines the compute resource requests and limits for the node-exporter container. + // This includes CPU, memory and HugePages constraints to help control scheduling and resource usage. + // When not specified, defaults are used by the platform. Requests cannot exceed limits. + // This field is optional. + // More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + // This is a simplified API that maps to Kubernetes ResourceRequirements. + // The current default values are: + // resources: + // - name: cpu + // request: 8m + // limit: null + // - name: memory + // request: 32Mi + // limit: null + // --- + // maxItems is set to 5 to stay within the Kubernetes CRD CEL validation cost budget. + // See the MaxItems comment near the ContainerResource type definition for details. + // Minimum length for this list is 1. + // Each resource name must be unique within this list. + Resources []ContainerResourceApplyConfiguration `json:"resources,omitempty"` + // tolerations defines tolerations for the pods. + // tolerations is optional. + // + // When omitted, this means the user has no opinion and the platform is left + // to choose reasonable defaults. These defaults are subject to change over time. + // The current default is to tolerate all taints (operator: Exists without any key), + // which is typical for DaemonSets that must run on every node. + // Maximum length for this list is 10. + // Minimum length for this list is 1. + Tolerations []v1.Toleration `json:"tolerations,omitempty"` + // collectors configures which node-exporter metric collectors are enabled. + // collectors is optional. + // Each collector can be individually enabled or disabled. Some collectors may have + // additional configuration options. + // + // When omitted, this means no opinion and the platform is left to choose a reasonable + // default, which is subject to change over time. + Collectors *NodeExporterCollectorConfigApplyConfiguration `json:"collectors,omitempty"` + // maxProcs sets the target number of CPUs on which the node-exporter process will run. + // maxProcs is optional. + // Use this setting to override the default value, which is set either to 4 or to the number + // of CPUs on the host, whichever is smaller. + // The default value is computed at runtime and set via the GOMAXPROCS environment variable before + // node-exporter is launched. + // If a kernel deadlock occurs or if performance degrades when reading from sysfs concurrently, + // you can change this value to 1, which limits node-exporter to running on one CPU. + // For nodes with a high CPU count, setting the limit to a low number saves resources by preventing + // Go routines from being scheduled to run on all CPUs. However, I/O performance degrades if the + // maxProcs value is set too low and there are many metrics to collect. + // The minimum value is 1 and the maximum value is 1024. + // When omitted, this means no opinion and the platform is left to choose a reasonable default, + // which is subject to change over time. The current default is min(4, number of host CPUs). + MaxProcs *int32 `json:"maxProcs,omitempty"` + // ignoredNetworkDevices is a list of regular expression patterns that match network devices + // to be excluded from the relevant collector configuration such as netdev, netclass, and ethtool. + // ignoredNetworkDevices is optional. + // + // When omitted, the Cluster Monitoring Operator uses a predefined list of devices to be excluded + // to minimize the impact on memory usage. + // When set as an empty list, no devices are excluded. + // If you modify this setting, monitor the prometheus-k8s deployment closely for excessive memory usage. + // Maximum length for this list is 50. + // Each entry must be at least 1 character and at most 1024 characters long. + IgnoredNetworkDevices *[]configv1alpha1.NodeExporterIgnoredNetworkDevice `json:"ignoredNetworkDevices,omitempty"` +} + +// NodeExporterConfigApplyConfiguration constructs a declarative configuration of the NodeExporterConfig type for use with +// apply. +func NodeExporterConfig() *NodeExporterConfigApplyConfiguration { + return &NodeExporterConfigApplyConfiguration{} +} + +// WithNodeSelector puts the entries into the NodeSelector field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the NodeSelector field, +// overwriting an existing map entries in NodeSelector field with the same key. +func (b *NodeExporterConfigApplyConfiguration) WithNodeSelector(entries map[string]string) *NodeExporterConfigApplyConfiguration { + if b.NodeSelector == nil && len(entries) > 0 { + b.NodeSelector = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.NodeSelector[k] = v + } + return b +} + +// WithResources adds the given value to the Resources field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Resources field. +func (b *NodeExporterConfigApplyConfiguration) WithResources(values ...*ContainerResourceApplyConfiguration) *NodeExporterConfigApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithResources") + } + b.Resources = append(b.Resources, *values[i]) + } + return b +} + +// WithTolerations adds the given value to the Tolerations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Tolerations field. +func (b *NodeExporterConfigApplyConfiguration) WithTolerations(values ...v1.Toleration) *NodeExporterConfigApplyConfiguration { + for i := range values { + b.Tolerations = append(b.Tolerations, values[i]) + } + return b +} + +// WithCollectors sets the Collectors field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Collectors field is set to the value of the last call. +func (b *NodeExporterConfigApplyConfiguration) WithCollectors(value *NodeExporterCollectorConfigApplyConfiguration) *NodeExporterConfigApplyConfiguration { + b.Collectors = value + return b +} + +// WithMaxProcs sets the MaxProcs field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MaxProcs field is set to the value of the last call. +func (b *NodeExporterConfigApplyConfiguration) WithMaxProcs(value int32) *NodeExporterConfigApplyConfiguration { + b.MaxProcs = &value + return b +} + +// WithIgnoredNetworkDevices sets the IgnoredNetworkDevices field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the IgnoredNetworkDevices field is set to the value of the last call. +func (b *NodeExporterConfigApplyConfiguration) WithIgnoredNetworkDevices(value []configv1alpha1.NodeExporterIgnoredNetworkDevice) *NodeExporterConfigApplyConfiguration { + b.IgnoredNetworkDevices = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/openshiftstatemetricsconfig.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/openshiftstatemetricsconfig.go index 045ef78730..daef85c244 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/openshiftstatemetricsconfig.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/openshiftstatemetricsconfig.go @@ -35,7 +35,7 @@ type OpenShiftStateMetricsConfigApplyConfiguration struct { // - name: memory // request: 32Mi // limit: null - // Maximum length for this list is 10. + // Maximum length for this list is 5. // Minimum length for this list is 1. // Each resource name must be unique within this list. Resources []ContainerResourceApplyConfiguration `json:"resources,omitempty"` diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/prometheusconfig.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/prometheusconfig.go index cd8fcb780b..31d3b9f58e 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/prometheusconfig.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/prometheusconfig.go @@ -85,14 +85,20 @@ type PrometheusConfigApplyConfiguration struct { // resources defines the compute resource requests and limits for the Prometheus container. // This includes CPU, memory and HugePages constraints to help control scheduling and resource usage. // When not specified, defaults are used by the platform. Requests cannot exceed limits. - // Each entry must have a unique resource name. - // Minimum of 1 and maximum of 10 resource entries can be specified. + // This field is optional. + // More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + // This is a simplified API that maps to Kubernetes ResourceRequirements. // The current default values are: // resources: // - name: cpu // request: 4m + // limit: null // - name: memory // request: 40Mi + // limit: null + // Maximum length for this list is 5. + // Minimum length for this list is 1. + // Each resource name must be unique within this list. Resources []ContainerResourceApplyConfiguration `json:"resources,omitempty"` // retention configures how long Prometheus retains metrics data and how much storage it can use. // When omitted, the platform chooses reasonable defaults (currently 15 days retention, no size limit). diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/prometheusoperatoradmissionwebhookconfig.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/prometheusoperatoradmissionwebhookconfig.go index b35fb15fef..9eadb023ec 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/prometheusoperatoradmissionwebhookconfig.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/prometheusoperatoradmissionwebhookconfig.go @@ -29,7 +29,7 @@ type PrometheusOperatorAdmissionWebhookConfigApplyConfiguration struct { // - name: memory // request: 30Mi // limit: null - // Maximum length for this list is 10. + // Maximum length for this list is 5. // Minimum length for this list is 1. // Each resource name must be unique within this list. Resources []ContainerResourceApplyConfiguration `json:"resources,omitempty"` diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/prometheusoperatorconfig.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/prometheusoperatorconfig.go index 2612926255..a0bac703d0 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/prometheusoperatorconfig.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/prometheusoperatorconfig.go @@ -46,7 +46,7 @@ type PrometheusOperatorConfigApplyConfiguration struct { // - name: memory // request: 40Mi // limit: null - // Maximum length for this list is 10. + // Maximum length for this list is 5. // Minimum length for this list is 1. // Each resource name must be unique within this list. Resources []ContainerResourceApplyConfiguration `json:"resources,omitempty"` diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/telemeterclientconfig.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/telemeterclientconfig.go new file mode 100644 index 0000000000..9d4c5cc331 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/telemeterclientconfig.go @@ -0,0 +1,118 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1 "k8s.io/api/core/v1" +) + +// TelemeterClientConfigApplyConfiguration represents a declarative configuration of the TelemeterClientConfig type for use +// with apply. +// +// TelemeterClientConfig provides configuration options for the Telemeter Client component +// that runs in the `openshift-monitoring` namespace. The Telemeter Client collects selected +// monitoring metrics and forwards them to Red Hat for telemetry purposes. +// At least one field must be specified. +type TelemeterClientConfigApplyConfiguration struct { + // nodeSelector defines the nodes on which the Pods are scheduled. + // nodeSelector is optional. + // + // When omitted, this means the user has no opinion and the platform is left + // to choose reasonable defaults. These defaults are subject to change over time. + // The current default value is `kubernetes.io/os: linux`. + // When specified, nodeSelector must contain at least 1 entry and must not contain more than 10 entries. + NodeSelector map[string]string `json:"nodeSelector,omitempty"` + // resources defines the compute resource requests and limits for the Telemeter Client container. + // This includes CPU, memory and HugePages constraints to help control scheduling and resource usage. + // When not specified, defaults are used by the platform. Requests cannot exceed limits. + // This field is optional. + // More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + // This is a simplified API that maps to Kubernetes ResourceRequirements. + // The current default values are: + // resources: + // - name: cpu + // request: 1m + // limit: null + // - name: memory + // request: 40Mi + // limit: null + // Maximum length for this list is 5. + // Minimum length for this list is 1. + // Each resource name must be unique within this list. + Resources []ContainerResourceApplyConfiguration `json:"resources,omitempty"` + // tolerations defines tolerations for the pods. + // tolerations is optional. + // + // When omitted, this means the user has no opinion and the platform is left + // to choose reasonable defaults. These defaults are subject to change over time. + // Defaults are empty/unset. + // Maximum length for this list is 10. + // Minimum length for this list is 1. + Tolerations []v1.Toleration `json:"tolerations,omitempty"` + // topologySpreadConstraints defines rules for how Telemeter Client Pods should be distributed + // across topology domains such as zones, nodes, or other user-defined labels. + // topologySpreadConstraints is optional. + // This helps improve high availability and resource efficiency by avoiding placing + // too many replicas in the same failure domain. + // + // When omitted, this means no opinion and the platform is left to choose a default, which is subject to change over time. + // This field maps directly to the `topologySpreadConstraints` field in the Pod spec. + // Default is empty list. + // Maximum length for this list is 10. + // Minimum length for this list is 1. + // Entries must have unique topologyKey and whenUnsatisfiable pairs. + TopologySpreadConstraints []v1.TopologySpreadConstraint `json:"topologySpreadConstraints,omitempty"` +} + +// TelemeterClientConfigApplyConfiguration constructs a declarative configuration of the TelemeterClientConfig type for use with +// apply. +func TelemeterClientConfig() *TelemeterClientConfigApplyConfiguration { + return &TelemeterClientConfigApplyConfiguration{} +} + +// WithNodeSelector puts the entries into the NodeSelector field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the NodeSelector field, +// overwriting an existing map entries in NodeSelector field with the same key. +func (b *TelemeterClientConfigApplyConfiguration) WithNodeSelector(entries map[string]string) *TelemeterClientConfigApplyConfiguration { + if b.NodeSelector == nil && len(entries) > 0 { + b.NodeSelector = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.NodeSelector[k] = v + } + return b +} + +// WithResources adds the given value to the Resources field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Resources field. +func (b *TelemeterClientConfigApplyConfiguration) WithResources(values ...*ContainerResourceApplyConfiguration) *TelemeterClientConfigApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithResources") + } + b.Resources = append(b.Resources, *values[i]) + } + return b +} + +// WithTolerations adds the given value to the Tolerations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Tolerations field. +func (b *TelemeterClientConfigApplyConfiguration) WithTolerations(values ...v1.Toleration) *TelemeterClientConfigApplyConfiguration { + for i := range values { + b.Tolerations = append(b.Tolerations, values[i]) + } + return b +} + +// WithTopologySpreadConstraints adds the given value to the TopologySpreadConstraints field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the TopologySpreadConstraints field. +func (b *TelemeterClientConfigApplyConfiguration) WithTopologySpreadConstraints(values ...v1.TopologySpreadConstraint) *TelemeterClientConfigApplyConfiguration { + for i := range values { + b.TopologySpreadConstraints = append(b.TopologySpreadConstraints, values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/thanosquerierconfig.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/thanosquerierconfig.go new file mode 100644 index 0000000000..f2fda246e1 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/thanosquerierconfig.go @@ -0,0 +1,117 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1 "k8s.io/api/core/v1" +) + +// ThanosQuerierConfigApplyConfiguration represents a declarative configuration of the ThanosQuerierConfig type for use +// with apply. +// +// ThanosQuerierConfig provides configuration options for the Thanos Querier component +// that runs in the `openshift-monitoring` namespace. +// At least one field must be specified; an empty thanosQuerierConfig object is not allowed. +type ThanosQuerierConfigApplyConfiguration struct { + // nodeSelector defines the nodes on which the Pods are scheduled. + // nodeSelector is optional. + // + // When omitted, this means the user has no opinion and the platform is left + // to choose reasonable defaults. These defaults are subject to change over time. + // The current default value is `kubernetes.io/os: linux`. + // When specified, nodeSelector must contain at least 1 entry and must not contain more than 10 entries. + NodeSelector map[string]string `json:"nodeSelector,omitempty"` + // resources defines the compute resource requests and limits for the Thanos Querier container. + // resources is optional. + // + // When omitted, this means the user has no opinion and the platform is left + // to choose reasonable defaults. These defaults are subject to change over time. + // Requests cannot exceed limits. + // More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + // This is a simplified API that maps to Kubernetes ResourceRequirements. + // The current default values are: + // resources: + // - name: cpu + // request: 5m + // - name: memory + // request: 12Mi + // Maximum length for this list is 5. + // Minimum length for this list is 1. + // Each resource name must be unique within this list. + Resources []ContainerResourceApplyConfiguration `json:"resources,omitempty"` + // tolerations defines tolerations for the pods. + // tolerations is optional. + // + // When omitted, this means the user has no opinion and the platform is left + // to choose reasonable defaults. These defaults are subject to change over time. + // Defaults are empty/unset. + // Maximum length for this list is 10. + // Minimum length for this list is 1. + Tolerations []v1.Toleration `json:"tolerations,omitempty"` + // topologySpreadConstraints defines rules for how Thanos Querier Pods should be distributed + // across topology domains such as zones, nodes, or other user-defined labels. + // topologySpreadConstraints is optional. + // This helps improve high availability and resource efficiency by avoiding placing + // too many replicas in the same failure domain. + // + // When omitted, this means no opinion and the platform is left to choose a default, which is subject to change over time. + // This field maps directly to the `topologySpreadConstraints` field in the Pod spec. + // Defaults are empty/unset. + // Maximum length for this list is 10. + // Minimum length for this list is 1. + // Entries must have unique topologyKey and whenUnsatisfiable pairs. + TopologySpreadConstraints []v1.TopologySpreadConstraint `json:"topologySpreadConstraints,omitempty"` +} + +// ThanosQuerierConfigApplyConfiguration constructs a declarative configuration of the ThanosQuerierConfig type for use with +// apply. +func ThanosQuerierConfig() *ThanosQuerierConfigApplyConfiguration { + return &ThanosQuerierConfigApplyConfiguration{} +} + +// WithNodeSelector puts the entries into the NodeSelector field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the NodeSelector field, +// overwriting an existing map entries in NodeSelector field with the same key. +func (b *ThanosQuerierConfigApplyConfiguration) WithNodeSelector(entries map[string]string) *ThanosQuerierConfigApplyConfiguration { + if b.NodeSelector == nil && len(entries) > 0 { + b.NodeSelector = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.NodeSelector[k] = v + } + return b +} + +// WithResources adds the given value to the Resources field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Resources field. +func (b *ThanosQuerierConfigApplyConfiguration) WithResources(values ...*ContainerResourceApplyConfiguration) *ThanosQuerierConfigApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithResources") + } + b.Resources = append(b.Resources, *values[i]) + } + return b +} + +// WithTolerations adds the given value to the Tolerations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Tolerations field. +func (b *ThanosQuerierConfigApplyConfiguration) WithTolerations(values ...v1.Toleration) *ThanosQuerierConfigApplyConfiguration { + for i := range values { + b.Tolerations = append(b.Tolerations, values[i]) + } + return b +} + +// WithTopologySpreadConstraints adds the given value to the TopologySpreadConstraints field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the TopologySpreadConstraints field. +func (b *ThanosQuerierConfigApplyConfiguration) WithTopologySpreadConstraints(values ...v1.TopologySpreadConstraint) *ThanosQuerierConfigApplyConfiguration { + for i := range values { + b.TopologySpreadConstraints = append(b.TopologySpreadConstraints, values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/internal/internal.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/internal/internal.go index 199221a076..2021ad596e 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/internal/internal.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/internal/internal.go @@ -668,6 +668,9 @@ var schemaYAML = typed.YAMLObject(`types: type: namedType: com.github.openshift.api.config.v1.APIServerServingCerts default: {} + - name: tlsAdherence + type: + scalar: string - name: tlsSecurityProfile type: namedType: com.github.openshift.api.config.v1.TLSSecurityProfile @@ -699,17 +702,6 @@ var schemaYAML = typed.YAMLObject(`types: default: "" unions: - discriminator: type -- name: com.github.openshift.api.config.v1.AWSKMSConfig - map: - fields: - - name: keyARN - type: - scalar: string - default: "" - - name: region - type: - scalar: string - default: "" - name: com.github.openshift.api.config.v1.AWSPlatformSpec map: fields: @@ -2718,18 +2710,19 @@ var schemaYAML = typed.YAMLObject(`types: - name: com.github.openshift.api.config.v1.KMSConfig map: fields: - - name: aws - type: - namedType: com.github.openshift.api.config.v1.AWSKMSConfig - name: type type: scalar: string default: "" + - name: vault + type: + namedType: com.github.openshift.api.config.v1.VaultKMSConfig + default: {} unions: - discriminator: type fields: - - fieldName: aws - discriminatorValue: AWS + - fieldName: vault + discriminatorValue: Vault - name: com.github.openshift.api.config.v1.KeystoneIdentityProvider map: fields: @@ -4685,6 +4678,76 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: string default: "" +- name: com.github.openshift.api.config.v1.VaultAppRoleAuthentication + map: + fields: + - name: secret + type: + namedType: com.github.openshift.api.config.v1.VaultSecretReference + default: {} +- name: com.github.openshift.api.config.v1.VaultAuthentication + map: + fields: + - name: appRole + type: + namedType: com.github.openshift.api.config.v1.VaultAppRoleAuthentication + default: {} + - name: type + type: + scalar: string + unions: + - discriminator: type + fields: + - fieldName: appRole + discriminatorValue: AppRole +- name: com.github.openshift.api.config.v1.VaultConfigMapReference + map: + fields: + - name: name + type: + scalar: string +- name: com.github.openshift.api.config.v1.VaultKMSConfig + map: + fields: + - name: authentication + type: + namedType: com.github.openshift.api.config.v1.VaultAuthentication + default: {} + - name: kmsPluginImage + type: + scalar: string + - name: tls + type: + namedType: com.github.openshift.api.config.v1.VaultTLSConfig + default: {} + - name: transitKey + type: + scalar: string + - name: transitMount + type: + scalar: string + - name: vaultAddress + type: + scalar: string + - name: vaultNamespace + type: + scalar: string +- name: com.github.openshift.api.config.v1.VaultSecretReference + map: + fields: + - name: name + type: + scalar: string +- name: com.github.openshift.api.config.v1.VaultTLSConfig + map: + fields: + - name: caBundle + type: + namedType: com.github.openshift.api.config.v1.VaultConfigMapReference + default: {} + - name: serverName + type: + scalar: string - name: com.github.openshift.api.config.v1.WebhookTokenAuthenticator map: fields: @@ -4925,6 +4988,14 @@ var schemaYAML = typed.YAMLObject(`types: type: namedType: com.github.openshift.api.config.v1alpha1.MetricsServerConfig default: {} + - name: monitoringPluginConfig + type: + namedType: com.github.openshift.api.config.v1alpha1.MonitoringPluginConfig + default: {} + - name: nodeExporterConfig + type: + namedType: com.github.openshift.api.config.v1alpha1.NodeExporterConfig + default: {} - name: openShiftStateMetricsConfig type: namedType: com.github.openshift.api.config.v1alpha1.OpenShiftStateMetricsConfig @@ -4941,6 +5012,14 @@ var schemaYAML = typed.YAMLObject(`types: type: namedType: com.github.openshift.api.config.v1alpha1.PrometheusOperatorConfig default: {} + - name: telemeterClientConfig + type: + namedType: com.github.openshift.api.config.v1alpha1.TelemeterClientConfig + default: {} + - name: thanosQuerierConfig + type: + namedType: com.github.openshift.api.config.v1alpha1.ThanosQuerierConfig + default: {} - name: userDefined type: namedType: com.github.openshift.api.config.v1alpha1.UserDefinedMonitoring @@ -5192,6 +5271,208 @@ var schemaYAML = typed.YAMLObject(`types: - name: verbosity type: scalar: string +- name: com.github.openshift.api.config.v1alpha1.MonitoringPluginConfig + map: + fields: + - name: nodeSelector + type: + map: + elementType: + scalar: string + - name: resources + type: + list: + elementType: + namedType: com.github.openshift.api.config.v1alpha1.ContainerResource + elementRelationship: associative + keys: + - name + - name: tolerations + type: + list: + elementType: + namedType: Toleration.v1.core.api.k8s.io + elementRelationship: atomic + - name: topologySpreadConstraints + type: + list: + elementType: + namedType: TopologySpreadConstraint.v1.core.api.k8s.io + elementRelationship: associative + keys: + - topologyKey + - whenUnsatisfiable +- name: com.github.openshift.api.config.v1alpha1.NodeExporterCollectorBuddyInfoConfig + map: + fields: + - name: collectionPolicy + type: + scalar: string +- name: com.github.openshift.api.config.v1alpha1.NodeExporterCollectorConfig + map: + fields: + - name: buddyInfo + type: + namedType: com.github.openshift.api.config.v1alpha1.NodeExporterCollectorBuddyInfoConfig + default: {} + - name: cpuFreq + type: + namedType: com.github.openshift.api.config.v1alpha1.NodeExporterCollectorCpufreqConfig + default: {} + - name: ethtool + type: + namedType: com.github.openshift.api.config.v1alpha1.NodeExporterCollectorEthtoolConfig + default: {} + - name: ksmd + type: + namedType: com.github.openshift.api.config.v1alpha1.NodeExporterCollectorKSMDConfig + default: {} + - name: mountStats + type: + namedType: com.github.openshift.api.config.v1alpha1.NodeExporterCollectorMountStatsConfig + default: {} + - name: netClass + type: + namedType: com.github.openshift.api.config.v1alpha1.NodeExporterCollectorNetClassConfig + default: {} + - name: netDev + type: + namedType: com.github.openshift.api.config.v1alpha1.NodeExporterCollectorNetDevConfig + default: {} + - name: processes + type: + namedType: com.github.openshift.api.config.v1alpha1.NodeExporterCollectorProcessesConfig + default: {} + - name: systemd + type: + namedType: com.github.openshift.api.config.v1alpha1.NodeExporterCollectorSystemdConfig + default: {} + - name: tcpStat + type: + namedType: com.github.openshift.api.config.v1alpha1.NodeExporterCollectorTcpStatConfig + default: {} +- name: com.github.openshift.api.config.v1alpha1.NodeExporterCollectorCpufreqConfig + map: + fields: + - name: collectionPolicy + type: + scalar: string +- name: com.github.openshift.api.config.v1alpha1.NodeExporterCollectorEthtoolConfig + map: + fields: + - name: collectionPolicy + type: + scalar: string +- name: com.github.openshift.api.config.v1alpha1.NodeExporterCollectorKSMDConfig + map: + fields: + - name: collectionPolicy + type: + scalar: string +- name: com.github.openshift.api.config.v1alpha1.NodeExporterCollectorMountStatsConfig + map: + fields: + - name: collectionPolicy + type: + scalar: string +- name: com.github.openshift.api.config.v1alpha1.NodeExporterCollectorNetClassCollectConfig + map: + fields: + - name: statsGatherer + type: + scalar: string +- name: com.github.openshift.api.config.v1alpha1.NodeExporterCollectorNetClassConfig + map: + fields: + - name: collect + type: + namedType: com.github.openshift.api.config.v1alpha1.NodeExporterCollectorNetClassCollectConfig + default: {} + - name: collectionPolicy + type: + scalar: string + unions: + - discriminator: collectionPolicy + fields: + - fieldName: collect + discriminatorValue: Collect +- name: com.github.openshift.api.config.v1alpha1.NodeExporterCollectorNetDevConfig + map: + fields: + - name: collectionPolicy + type: + scalar: string +- name: com.github.openshift.api.config.v1alpha1.NodeExporterCollectorProcessesConfig + map: + fields: + - name: collectionPolicy + type: + scalar: string +- name: com.github.openshift.api.config.v1alpha1.NodeExporterCollectorSystemdCollectConfig + map: + fields: + - name: units + type: + list: + elementType: + scalar: string + elementRelationship: associative +- name: com.github.openshift.api.config.v1alpha1.NodeExporterCollectorSystemdConfig + map: + fields: + - name: collect + type: + namedType: com.github.openshift.api.config.v1alpha1.NodeExporterCollectorSystemdCollectConfig + default: {} + - name: collectionPolicy + type: + scalar: string + unions: + - discriminator: collectionPolicy + fields: + - fieldName: collect + discriminatorValue: Collect +- name: com.github.openshift.api.config.v1alpha1.NodeExporterCollectorTcpStatConfig + map: + fields: + - name: collectionPolicy + type: + scalar: string +- name: com.github.openshift.api.config.v1alpha1.NodeExporterConfig + map: + fields: + - name: collectors + type: + namedType: com.github.openshift.api.config.v1alpha1.NodeExporterCollectorConfig + default: {} + - name: ignoredNetworkDevices + type: + list: + elementType: + scalar: string + elementRelationship: associative + - name: maxProcs + type: + scalar: numeric + - name: nodeSelector + type: + map: + elementType: + scalar: string + - name: resources + type: + list: + elementType: + namedType: com.github.openshift.api.config.v1alpha1.ContainerResource + elementRelationship: associative + keys: + - name + - name: tolerations + type: + list: + elementType: + namedType: Toleration.v1.core.api.k8s.io + elementRelationship: atomic - name: com.github.openshift.api.config.v1alpha1.OAuth2 map: fields: @@ -5760,6 +6041,68 @@ var schemaYAML = typed.YAMLObject(`types: - name: serverName type: scalar: string +- name: com.github.openshift.api.config.v1alpha1.TelemeterClientConfig + map: + fields: + - name: nodeSelector + type: + map: + elementType: + scalar: string + - name: resources + type: + list: + elementType: + namedType: com.github.openshift.api.config.v1alpha1.ContainerResource + elementRelationship: associative + keys: + - name + - name: tolerations + type: + list: + elementType: + namedType: Toleration.v1.core.api.k8s.io + elementRelationship: atomic + - name: topologySpreadConstraints + type: + list: + elementType: + namedType: TopologySpreadConstraint.v1.core.api.k8s.io + elementRelationship: associative + keys: + - topologyKey + - whenUnsatisfiable +- name: com.github.openshift.api.config.v1alpha1.ThanosQuerierConfig + map: + fields: + - name: nodeSelector + type: + map: + elementType: + scalar: string + - name: resources + type: + list: + elementType: + namedType: com.github.openshift.api.config.v1alpha1.ContainerResource + elementRelationship: associative + keys: + - name + - name: tolerations + type: + list: + elementType: + namedType: Toleration.v1.core.api.k8s.io + elementRelationship: atomic + - name: topologySpreadConstraints + type: + list: + elementType: + namedType: TopologySpreadConstraint.v1.core.api.k8s.io + elementRelationship: associative + keys: + - topologyKey + - whenUnsatisfiable - name: com.github.openshift.api.config.v1alpha1.UppercaseActionConfig map: fields: diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/utils.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/utils.go index c01072fe57..bbec377c36 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/utils.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/utils.go @@ -50,8 +50,6 @@ func ForKind(kind schema.GroupVersionKind) interface{} { return &configv1.AWSDNSSpecApplyConfiguration{} case v1.SchemeGroupVersion.WithKind("AWSIngressSpec"): return &configv1.AWSIngressSpecApplyConfiguration{} - case v1.SchemeGroupVersion.WithKind("AWSKMSConfig"): - return &configv1.AWSKMSConfigApplyConfiguration{} case v1.SchemeGroupVersion.WithKind("AWSPlatformSpec"): return &configv1.AWSPlatformSpecApplyConfiguration{} case v1.SchemeGroupVersion.WithKind("AWSPlatformStatus"): @@ -452,6 +450,18 @@ func ForKind(kind schema.GroupVersionKind) interface{} { return &configv1.UsernameClaimMappingApplyConfiguration{} case v1.SchemeGroupVersion.WithKind("UsernamePrefix"): return &configv1.UsernamePrefixApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("VaultAppRoleAuthentication"): + return &configv1.VaultAppRoleAuthenticationApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("VaultAuthentication"): + return &configv1.VaultAuthenticationApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("VaultConfigMapReference"): + return &configv1.VaultConfigMapReferenceApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("VaultKMSConfig"): + return &configv1.VaultKMSConfigApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("VaultSecretReference"): + return &configv1.VaultSecretReferenceApplyConfiguration{} + case v1.SchemeGroupVersion.WithKind("VaultTLSConfig"): + return &configv1.VaultTLSConfigApplyConfiguration{} case v1.SchemeGroupVersion.WithKind("VSphereFailureDomainHostGroup"): return &configv1.VSphereFailureDomainHostGroupApplyConfiguration{} case v1.SchemeGroupVersion.WithKind("VSphereFailureDomainRegionAffinity"): @@ -542,6 +552,36 @@ func ForKind(kind schema.GroupVersionKind) interface{} { return &configv1alpha1.MetadataConfigCustomApplyConfiguration{} case v1alpha1.SchemeGroupVersion.WithKind("MetricsServerConfig"): return &configv1alpha1.MetricsServerConfigApplyConfiguration{} + case v1alpha1.SchemeGroupVersion.WithKind("MonitoringPluginConfig"): + return &configv1alpha1.MonitoringPluginConfigApplyConfiguration{} + case v1alpha1.SchemeGroupVersion.WithKind("NodeExporterCollectorBuddyInfoConfig"): + return &configv1alpha1.NodeExporterCollectorBuddyInfoConfigApplyConfiguration{} + case v1alpha1.SchemeGroupVersion.WithKind("NodeExporterCollectorConfig"): + return &configv1alpha1.NodeExporterCollectorConfigApplyConfiguration{} + case v1alpha1.SchemeGroupVersion.WithKind("NodeExporterCollectorCpufreqConfig"): + return &configv1alpha1.NodeExporterCollectorCpufreqConfigApplyConfiguration{} + case v1alpha1.SchemeGroupVersion.WithKind("NodeExporterCollectorEthtoolConfig"): + return &configv1alpha1.NodeExporterCollectorEthtoolConfigApplyConfiguration{} + case v1alpha1.SchemeGroupVersion.WithKind("NodeExporterCollectorKSMDConfig"): + return &configv1alpha1.NodeExporterCollectorKSMDConfigApplyConfiguration{} + case v1alpha1.SchemeGroupVersion.WithKind("NodeExporterCollectorMountStatsConfig"): + return &configv1alpha1.NodeExporterCollectorMountStatsConfigApplyConfiguration{} + case v1alpha1.SchemeGroupVersion.WithKind("NodeExporterCollectorNetClassCollectConfig"): + return &configv1alpha1.NodeExporterCollectorNetClassCollectConfigApplyConfiguration{} + case v1alpha1.SchemeGroupVersion.WithKind("NodeExporterCollectorNetClassConfig"): + return &configv1alpha1.NodeExporterCollectorNetClassConfigApplyConfiguration{} + case v1alpha1.SchemeGroupVersion.WithKind("NodeExporterCollectorNetDevConfig"): + return &configv1alpha1.NodeExporterCollectorNetDevConfigApplyConfiguration{} + case v1alpha1.SchemeGroupVersion.WithKind("NodeExporterCollectorProcessesConfig"): + return &configv1alpha1.NodeExporterCollectorProcessesConfigApplyConfiguration{} + case v1alpha1.SchemeGroupVersion.WithKind("NodeExporterCollectorSystemdCollectConfig"): + return &configv1alpha1.NodeExporterCollectorSystemdCollectConfigApplyConfiguration{} + case v1alpha1.SchemeGroupVersion.WithKind("NodeExporterCollectorSystemdConfig"): + return &configv1alpha1.NodeExporterCollectorSystemdConfigApplyConfiguration{} + case v1alpha1.SchemeGroupVersion.WithKind("NodeExporterCollectorTcpStatConfig"): + return &configv1alpha1.NodeExporterCollectorTcpStatConfigApplyConfiguration{} + case v1alpha1.SchemeGroupVersion.WithKind("NodeExporterConfig"): + return &configv1alpha1.NodeExporterConfigApplyConfiguration{} case v1alpha1.SchemeGroupVersion.WithKind("OAuth2"): return &configv1alpha1.OAuth2ApplyConfiguration{} case v1alpha1.SchemeGroupVersion.WithKind("OAuth2EndpointParam"): @@ -596,6 +636,10 @@ func ForKind(kind schema.GroupVersionKind) interface{} { return &configv1alpha1.Sigv4ApplyConfiguration{} case v1alpha1.SchemeGroupVersion.WithKind("Storage"): return &configv1alpha1.StorageApplyConfiguration{} + case v1alpha1.SchemeGroupVersion.WithKind("TelemeterClientConfig"): + return &configv1alpha1.TelemeterClientConfigApplyConfiguration{} + case v1alpha1.SchemeGroupVersion.WithKind("ThanosQuerierConfig"): + return &configv1alpha1.ThanosQuerierConfigApplyConfiguration{} case v1alpha1.SchemeGroupVersion.WithKind("TLSConfig"): return &configv1alpha1.TLSConfigApplyConfiguration{} case v1alpha1.SchemeGroupVersion.WithKind("UppercaseActionConfig"): diff --git a/vendor/github.com/openshift/client-go/machine/applyconfigurations/internal/internal.go b/vendor/github.com/openshift/client-go/machine/applyconfigurations/internal/internal.go index 03faec6663..1dac6aa7dd 100644 --- a/vendor/github.com/openshift/client-go/machine/applyconfigurations/internal/internal.go +++ b/vendor/github.com/openshift/client-go/machine/applyconfigurations/internal/internal.go @@ -791,6 +791,9 @@ var schemaYAML = typed.YAMLObject(`types: - name: fullyLabeledReplicas type: scalar: numeric + - name: labelSelector + type: + scalar: string - name: observedGeneration type: scalar: numeric diff --git a/vendor/github.com/openshift/client-go/machine/applyconfigurations/machine/v1beta1/machinesetstatus.go b/vendor/github.com/openshift/client-go/machine/applyconfigurations/machine/v1beta1/machinesetstatus.go index 3a2a76754f..b4020af133 100644 --- a/vendor/github.com/openshift/client-go/machine/applyconfigurations/machine/v1beta1/machinesetstatus.go +++ b/vendor/github.com/openshift/client-go/machine/applyconfigurations/machine/v1beta1/machinesetstatus.go @@ -21,6 +21,11 @@ type MachineSetStatusApplyConfiguration struct { AvailableReplicas *int32 `json:"availableReplicas,omitempty"` // observedGeneration reflects the generation of the most recently observed MachineSet. ObservedGeneration *int64 `json:"observedGeneration,omitempty"` + // labelSelector is a label selector, in string format, for Machines corresponding to the MachineSet. + // It is exposed via the scale subresource as status.selector. + // When omitted, the MachineSet controller has not yet reconciled spec.selector into status.labelSelector. + // When present, it must not be empty and must not exceed 4096 characters. + LabelSelector *string `json:"labelSelector,omitempty"` // In the event that there is a terminal problem reconciling the // replicas, both ErrorReason and ErrorMessage will be set. ErrorReason // will be populated with a succinct value suitable for machine @@ -105,6 +110,14 @@ func (b *MachineSetStatusApplyConfiguration) WithObservedGeneration(value int64) return b } +// WithLabelSelector sets the LabelSelector field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LabelSelector field is set to the value of the last call. +func (b *MachineSetStatusApplyConfiguration) WithLabelSelector(value string) *MachineSetStatusApplyConfiguration { + b.LabelSelector = &value + return b +} + // WithErrorReason sets the ErrorReason field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ErrorReason field is set to the value of the last call. diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/internal/internal.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/internal/internal.go index 3d7d6e73de..7603b05954 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/internal/internal.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/internal/internal.go @@ -684,6 +684,16 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: string default: "" +- name: com.github.openshift.api.operator.v1.BGPManagedConfig + map: + fields: + - name: asNumber + type: + scalar: numeric + default: 64512 + - name: bgpTopology + type: + scalar: string - name: com.github.openshift.api.operator.v1.BootImageSkewEnforcementConfig map: fields: @@ -2261,6 +2271,9 @@ var schemaYAML = typed.YAMLObject(`types: - name: clientTimeout type: namedType: Duration.v1.meta.apis.pkg.apimachinery.k8s.io + - name: configurationManagement + type: + scalar: string - name: connectTimeout type: namedType: Duration.v1.meta.apis.pkg.apimachinery.k8s.io @@ -3132,6 +3145,15 @@ var schemaYAML = typed.YAMLObject(`types: - name: version type: scalar: string +- name: com.github.openshift.api.operator.v1.NoOverlayConfig + map: + fields: + - name: outboundSNAT + type: + scalar: string + - name: routing + type: + scalar: string - name: com.github.openshift.api.operator.v1.NodeDisruptionPolicyClusterStatus map: fields: @@ -3431,6 +3453,10 @@ var schemaYAML = typed.YAMLObject(`types: - name: com.github.openshift.api.operator.v1.OVNKubernetesConfig map: fields: + - name: bgpManagedConfig + type: + namedType: com.github.openshift.api.operator.v1.BGPManagedConfig + default: {} - name: egressIPConfig type: namedType: com.github.openshift.api.operator.v1.EgressIPConfig @@ -3458,12 +3484,19 @@ var schemaYAML = typed.YAMLObject(`types: - name: mtu type: scalar: numeric + - name: noOverlayConfig + type: + namedType: com.github.openshift.api.operator.v1.NoOverlayConfig + default: {} - name: policyAuditConfig type: namedType: com.github.openshift.api.operator.v1.PolicyAuditConfig - name: routeAdvertisements type: scalar: string + - name: transport + type: + scalar: string - name: v4InternalSubnet type: scalar: string @@ -4383,6 +4416,9 @@ var schemaYAML = typed.YAMLObject(`types: type: namedType: com.github.openshift.api.operator.v1alpha1.ClusterAPIInstallerComponentImage default: {} + - name: name + type: + scalar: string - name: type type: scalar: string @@ -4412,6 +4448,14 @@ var schemaYAML = typed.YAMLObject(`types: - name: contentID type: scalar: string + - name: manifestSubstitutions + type: + list: + elementType: + namedType: com.github.openshift.api.operator.v1alpha1.ClusterAPIInstallerRevisionManifestSubstitution + elementRelationship: associative + keys: + - key - name: name type: scalar: string @@ -4425,6 +4469,15 @@ var schemaYAML = typed.YAMLObject(`types: scalar: string elementRelationship: atomic elementRelationship: atomic +- name: com.github.openshift.api.operator.v1alpha1.ClusterAPIInstallerRevisionManifestSubstitution + map: + fields: + - name: key + type: + scalar: string + - name: value + type: + scalar: string - name: com.github.openshift.api.operator.v1alpha1.ClusterAPISpec map: fields: @@ -4443,6 +4496,9 @@ var schemaYAML = typed.YAMLObject(`types: - name: desiredRevision type: scalar: string + - name: observedRevisionGeneration + type: + scalar: numeric - name: revisions type: list: diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/awscsidriverconfigspec.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/awscsidriverconfigspec.go index 9274b84ad6..f527a9312a 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/awscsidriverconfigspec.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/awscsidriverconfigspec.go @@ -10,6 +10,12 @@ type AWSCSIDriverConfigSpecApplyConfiguration struct { // kmsKeyARN sets the cluster default storage class to encrypt volumes with a user-defined KMS key, // rather than the default KMS key used by AWS. // The value may be either the ARN or Alias ARN of a KMS key. + // + // The ARN must follow the format: arn::kms:::(key|alias)/, where: + // is the AWS partition (aws, aws-cn, aws-us-gov, aws-iso, aws-iso-b, aws-iso-e, aws-iso-f, or aws-eusc), + // is the AWS region, + // is a 12-digit numeric identifier for the AWS account, + // is the KMS key ID or alias name. KMSKeyARN *string `json:"kmsKeyARN,omitempty"` // efsVolumeMetrics sets the configuration for collecting metrics from EFS volumes used by the EFS CSI Driver. EFSVolumeMetrics *AWSEFSVolumeMetricsApplyConfiguration `json:"efsVolumeMetrics,omitempty"` diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/bgpmanagedconfig.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/bgpmanagedconfig.go new file mode 100644 index 0000000000..071ac3fc23 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/bgpmanagedconfig.go @@ -0,0 +1,46 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + operatorv1 "github.com/openshift/api/operator/v1" +) + +// BGPManagedConfigApplyConfiguration represents a declarative configuration of the BGPManagedConfig type for use +// with apply. +// +// BGPManagedConfig contains configuration options for BGP when routing is "Managed". +type BGPManagedConfigApplyConfiguration struct { + // asNumber is the 2-byte or 4-byte Autonomous System Number (ASN) + // to be used in the generated FRR configuration. + // Valid values are 1 to 4294967295. + // When omitted, this defaults to 64512. + ASNumber *int64 `json:"asNumber,omitempty"` + // bgpTopology defines the BGP topology to be used. + // Allowed values are "FullMesh". + // When set to "FullMesh", every node peers directly with every other node via BGP. + // This field is required when BGPManagedConfig is specified. + BGPTopology *operatorv1.BGPTopology `json:"bgpTopology,omitempty"` +} + +// BGPManagedConfigApplyConfiguration constructs a declarative configuration of the BGPManagedConfig type for use with +// apply. +func BGPManagedConfig() *BGPManagedConfigApplyConfiguration { + return &BGPManagedConfigApplyConfiguration{} +} + +// WithASNumber sets the ASNumber field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ASNumber field is set to the value of the last call. +func (b *BGPManagedConfigApplyConfiguration) WithASNumber(value int64) *BGPManagedConfigApplyConfiguration { + b.ASNumber = &value + return b +} + +// WithBGPTopology sets the BGPTopology field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the BGPTopology field is set to the value of the last call. +func (b *BGPManagedConfigApplyConfiguration) WithBGPTopology(value operatorv1.BGPTopology) *BGPManagedConfigApplyConfiguration { + b.BGPTopology = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ingresscontrollertuningoptions.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ingresscontrollertuningoptions.go index 831ef5130e..d62c99282a 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ingresscontrollertuningoptions.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ingresscontrollertuningoptions.go @@ -3,6 +3,7 @@ package v1 import ( + operatorv1 "github.com/openshift/api/operator/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -209,6 +210,32 @@ type IngressControllerTuningOptionsApplyConfiguration struct { // be reloaded less frequently, and newly created routes will not be served until the // subsequent reload. ReloadInterval *metav1.Duration `json:"reloadInterval,omitempty"` + // configurationManagement specifies how OpenShift router should update + // the HAProxy configuration. The following values are valid for this + // field: + // + // * "ForkAndReload". + // * "Dynamic". + // + // Omitting this field means that the user has no opinion and the + // platform may choose a reasonable default. This default is subject to + // change over time. The current default is "ForkAndReload". + // + // "ForkAndReload" means that OpenShift router should rewrite the + // HAProxy configuration file and instruct HAProxy to fork and reload. + // This is OpenShift router's traditional approach. + // + // "Dynamic" means that OpenShift router may use HAProxy's control + // socket for some configuration updates and fall back to fork and + // reload for other configuration updates. This is a newer approach, + // which may be less mature than ForkAndReload. This setting can + // improve load-balancing fairness and metrics accuracy and reduce CPU + // and memory usage if HAProxy has frequent configuration updates for + // route and endpoints updates. + // + // Note: The "Dynamic" option is currently experimental and should not + // be enabled on production clusters. + ConfigurationManagement *operatorv1.IngressControllerConfigurationManagement `json:"configurationManagement,omitempty"` } // IngressControllerTuningOptionsApplyConfiguration constructs a declarative configuration of the IngressControllerTuningOptions type for use with @@ -328,3 +355,11 @@ func (b *IngressControllerTuningOptionsApplyConfiguration) WithReloadInterval(va b.ReloadInterval = &value return b } + +// WithConfigurationManagement sets the ConfigurationManagement field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ConfigurationManagement field is set to the value of the last call. +func (b *IngressControllerTuningOptionsApplyConfiguration) WithConfigurationManagement(value operatorv1.IngressControllerConfigurationManagement) *IngressControllerTuningOptionsApplyConfiguration { + b.ConfigurationManagement = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/nooverlayconfig.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/nooverlayconfig.go new file mode 100644 index 0000000000..0feb3009ea --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/nooverlayconfig.go @@ -0,0 +1,50 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + operatorv1 "github.com/openshift/api/operator/v1" +) + +// NoOverlayConfigApplyConfiguration represents a declarative configuration of the NoOverlayConfig type for use +// with apply. +// +// NoOverlayConfig contains configuration options for networks operating in no-overlay mode. +type NoOverlayConfigApplyConfiguration struct { + // outboundSNAT defines the SNAT behavior for outbound traffic from pods. + // Allowed values are "Enabled" and "Disabled". + // When set to "Enabled", SNAT is performed on outbound traffic from pods. + // When set to "Disabled", SNAT is not performed and pod IPs are preserved in outbound traffic. + // This field is required when the network operates in no-overlay mode. + // This field can be set to any value at installation time and can be changed afterwards. + OutboundSNAT *operatorv1.SNATOption `json:"outboundSNAT,omitempty"` + // routing specifies whether the pod network routing is managed by OVN-Kubernetes or users. + // Allowed values are "Managed" and "Unmanaged". + // When set to "Managed", OVN-Kubernetes manages the pod network routing configuration through BGP. + // When set to "Unmanaged", users are responsible for configuring the pod network routing. + // This field is required when the network operates in no-overlay mode. + // This field is immutable once set. + Routing *operatorv1.RoutingOption `json:"routing,omitempty"` +} + +// NoOverlayConfigApplyConfiguration constructs a declarative configuration of the NoOverlayConfig type for use with +// apply. +func NoOverlayConfig() *NoOverlayConfigApplyConfiguration { + return &NoOverlayConfigApplyConfiguration{} +} + +// WithOutboundSNAT sets the OutboundSNAT field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the OutboundSNAT field is set to the value of the last call. +func (b *NoOverlayConfigApplyConfiguration) WithOutboundSNAT(value operatorv1.SNATOption) *NoOverlayConfigApplyConfiguration { + b.OutboundSNAT = &value + return b +} + +// WithRouting sets the Routing field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Routing field is set to the value of the last call. +func (b *NoOverlayConfigApplyConfiguration) WithRouting(value operatorv1.RoutingOption) *NoOverlayConfigApplyConfiguration { + b.Routing = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ovnkubernetesconfig.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ovnkubernetesconfig.go index 6a2d864421..ead45f83b5 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ovnkubernetesconfig.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ovnkubernetesconfig.go @@ -62,6 +62,27 @@ type OVNKubernetesConfigApplyConfiguration struct { // reasonable defaults. These defaults are subject to change over time. The // current default is "Disabled". RouteAdvertisements *operatorv1.RouteAdvertisementsEnablement `json:"routeAdvertisements,omitempty"` + // transport sets the transport mode for pods on the default network. + // Allowed values are "NoOverlay" and "Geneve". + // "NoOverlay" avoids tunnel encapsulation, routing pod traffic directly between nodes. + // "Geneve" encapsulates pod traffic using Geneve tunnels between nodes. + // When omitted, this means the user has no opinion and the platform chooses + // a reasonable default which is subject to change over time. + // The current default is "Geneve". + // "NoOverlay" can only be set at installation time and cannot be changed afterwards. + // "Geneve" may be set explicitly at any time to lock in the current default. + Transport *operatorv1.TransportOption `json:"transport,omitempty"` + // noOverlayConfig contains configuration for no-overlay mode. + // This configuration applies to the default network only. + // It is required when transport is "NoOverlay". + // When omitted, this means the user does not configure no-overlay mode options. + NoOverlayConfig *NoOverlayConfigApplyConfiguration `json:"noOverlayConfig,omitempty"` + // bgpManagedConfig configures the BGP properties for networks (default network or CUDNs) + // in no-overlay mode that specify routing="Managed" in their noOverlayConfig. + // It is required when noOverlayConfig.routing is set to "Managed". + // When omitted, this means the user does not configure BGP for managed routing. + // This field can be set at installation time or on day 2, and can be modified at any time. + BGPManagedConfig *BGPManagedConfigApplyConfiguration `json:"bgpManagedConfig,omitempty"` } // OVNKubernetesConfigApplyConfiguration constructs a declarative configuration of the OVNKubernetesConfig type for use with @@ -165,3 +186,27 @@ func (b *OVNKubernetesConfigApplyConfiguration) WithRouteAdvertisements(value op b.RouteAdvertisements = &value return b } + +// WithTransport sets the Transport field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Transport field is set to the value of the last call. +func (b *OVNKubernetesConfigApplyConfiguration) WithTransport(value operatorv1.TransportOption) *OVNKubernetesConfigApplyConfiguration { + b.Transport = &value + return b +} + +// WithNoOverlayConfig sets the NoOverlayConfig field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the NoOverlayConfig field is set to the value of the last call. +func (b *OVNKubernetesConfigApplyConfiguration) WithNoOverlayConfig(value *NoOverlayConfigApplyConfiguration) *OVNKubernetesConfigApplyConfiguration { + b.NoOverlayConfig = value + return b +} + +// WithBGPManagedConfig sets the BGPManagedConfig field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the BGPManagedConfig field is set to the value of the last call. +func (b *OVNKubernetesConfigApplyConfiguration) WithBGPManagedConfig(value *BGPManagedConfigApplyConfiguration) *OVNKubernetesConfigApplyConfiguration { + b.BGPManagedConfig = value + return b +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 6d49944c0d..f16aa60994 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -986,7 +986,7 @@ github.com/openshift-eng/openshift-tests-extension/pkg/ginkgo github.com/openshift-eng/openshift-tests-extension/pkg/junit github.com/openshift-eng/openshift-tests-extension/pkg/util/sets github.com/openshift-eng/openshift-tests-extension/pkg/version -# github.com/openshift/api v0.0.0-20260317165824-54a3998d81eb +# github.com/openshift/api v0.0.0-20260429211050-21ed4c20b122 ## explicit; go 1.25.0 github.com/openshift/api github.com/openshift/api/annotations @@ -1010,6 +1010,7 @@ github.com/openshift/api/config/v1alpha2 github.com/openshift/api/console github.com/openshift/api/console/v1 github.com/openshift/api/etcd +github.com/openshift/api/etcd/v1 github.com/openshift/api/etcd/v1alpha1 github.com/openshift/api/features github.com/openshift/api/helm @@ -1065,7 +1066,7 @@ github.com/openshift/api/template github.com/openshift/api/template/v1 github.com/openshift/api/user github.com/openshift/api/user/v1 -# github.com/openshift/client-go v0.0.0-20260317180604-743f664b82d1 +# github.com/openshift/client-go v0.0.0-20260429123927-c81f86abfa6a ## explicit; go 1.25.0 github.com/openshift/client-go/config/applyconfigurations github.com/openshift/client-go/config/applyconfigurations/config/v1 From 1ac54fe229637b31a455a34a58f086f93406d2e9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Radek=20Ma=C5=88=C3=A1k?= Date: Thu, 30 Apr 2026 15:49:17 +0200 Subject: [PATCH 2/2] machineset: populate status.labelSelector for scale subresource Set status.labelSelector from spec.selector on each reconcile so the CRD scale subresource (labelSelectorPath: .status.labelSelector) can expose the selector string to the HPA and other autoscalers via the autoscaling/v1 Scale status.selector field. Add tests verifying calculateStatus sets labelSelector correctly and that updateMachineSetStatus writes the field even when replica counts are unchanged. --- ...tor_03_machineset.CustomNoUpgrade.crd.yaml | 9 ++ ...pi-operator_03_machineset.Default.crd.yaml | 9 ++ ...03_machineset.DevPreviewNoUpgrade.crd.yaml | 9 ++ ...ne-api-operator_03_machineset.OKD.crd.yaml | 9 ++ ...3_machineset.TechPreviewNoUpgrade.crd.yaml | 9 ++ pkg/controller/machineset/status.go | 21 ++-- pkg/controller/machineset/status_test.go | 114 ++++++++++++++++++ 7 files changed, 172 insertions(+), 8 deletions(-) create mode 100644 pkg/controller/machineset/status_test.go diff --git a/install/0000_30_machine-api-operator_03_machineset.CustomNoUpgrade.crd.yaml b/install/0000_30_machine-api-operator_03_machineset.CustomNoUpgrade.crd.yaml index 7c68b7b189..06f2ea472a 100644 --- a/install/0000_30_machine-api-operator_03_machineset.CustomNoUpgrade.crd.yaml +++ b/install/0000_30_machine-api-operator_03_machineset.CustomNoUpgrade.crd.yaml @@ -660,6 +660,15 @@ spec: labels of the machine template of the MachineSet. format: int32 type: integer + labelSelector: + description: |- + labelSelector is a label selector, in string format, for Machines corresponding to the MachineSet. + It is exposed via the scale subresource as status.selector. + When omitted, the MachineSet controller has not yet reconciled spec.selector into status.labelSelector. + When present, it must not be empty and must not exceed 4096 characters. + maxLength: 4096 + minLength: 1 + type: string observedGeneration: description: observedGeneration reflects the generation of the most recently observed MachineSet. diff --git a/install/0000_30_machine-api-operator_03_machineset.Default.crd.yaml b/install/0000_30_machine-api-operator_03_machineset.Default.crd.yaml index 624d477021..fb47ff7c99 100644 --- a/install/0000_30_machine-api-operator_03_machineset.Default.crd.yaml +++ b/install/0000_30_machine-api-operator_03_machineset.Default.crd.yaml @@ -615,6 +615,15 @@ spec: labels of the machine template of the MachineSet. format: int32 type: integer + labelSelector: + description: |- + labelSelector is a label selector, in string format, for Machines corresponding to the MachineSet. + It is exposed via the scale subresource as status.selector. + When omitted, the MachineSet controller has not yet reconciled spec.selector into status.labelSelector. + When present, it must not be empty and must not exceed 4096 characters. + maxLength: 4096 + minLength: 1 + type: string observedGeneration: description: observedGeneration reflects the generation of the most recently observed MachineSet. diff --git a/install/0000_30_machine-api-operator_03_machineset.DevPreviewNoUpgrade.crd.yaml b/install/0000_30_machine-api-operator_03_machineset.DevPreviewNoUpgrade.crd.yaml index 37fb42ca91..6b32059d0f 100644 --- a/install/0000_30_machine-api-operator_03_machineset.DevPreviewNoUpgrade.crd.yaml +++ b/install/0000_30_machine-api-operator_03_machineset.DevPreviewNoUpgrade.crd.yaml @@ -660,6 +660,15 @@ spec: labels of the machine template of the MachineSet. format: int32 type: integer + labelSelector: + description: |- + labelSelector is a label selector, in string format, for Machines corresponding to the MachineSet. + It is exposed via the scale subresource as status.selector. + When omitted, the MachineSet controller has not yet reconciled spec.selector into status.labelSelector. + When present, it must not be empty and must not exceed 4096 characters. + maxLength: 4096 + minLength: 1 + type: string observedGeneration: description: observedGeneration reflects the generation of the most recently observed MachineSet. diff --git a/install/0000_30_machine-api-operator_03_machineset.OKD.crd.yaml b/install/0000_30_machine-api-operator_03_machineset.OKD.crd.yaml index 7cd0341da0..0cb72ea4b7 100644 --- a/install/0000_30_machine-api-operator_03_machineset.OKD.crd.yaml +++ b/install/0000_30_machine-api-operator_03_machineset.OKD.crd.yaml @@ -615,6 +615,15 @@ spec: labels of the machine template of the MachineSet. format: int32 type: integer + labelSelector: + description: |- + labelSelector is a label selector, in string format, for Machines corresponding to the MachineSet. + It is exposed via the scale subresource as status.selector. + When omitted, the MachineSet controller has not yet reconciled spec.selector into status.labelSelector. + When present, it must not be empty and must not exceed 4096 characters. + maxLength: 4096 + minLength: 1 + type: string observedGeneration: description: observedGeneration reflects the generation of the most recently observed MachineSet. diff --git a/install/0000_30_machine-api-operator_03_machineset.TechPreviewNoUpgrade.crd.yaml b/install/0000_30_machine-api-operator_03_machineset.TechPreviewNoUpgrade.crd.yaml index d93d2c15f8..71e0196782 100644 --- a/install/0000_30_machine-api-operator_03_machineset.TechPreviewNoUpgrade.crd.yaml +++ b/install/0000_30_machine-api-operator_03_machineset.TechPreviewNoUpgrade.crd.yaml @@ -660,6 +660,15 @@ spec: labels of the machine template of the MachineSet. format: int32 type: integer + labelSelector: + description: |- + labelSelector is a label selector, in string format, for Machines corresponding to the MachineSet. + It is exposed via the scale subresource as status.selector. + When omitted, the MachineSet controller has not yet reconciled spec.selector into status.labelSelector. + When present, it must not be empty and must not exceed 4096 characters. + maxLength: 4096 + minLength: 1 + type: string observedGeneration: description: observedGeneration reflects the generation of the most recently observed MachineSet. diff --git a/pkg/controller/machineset/status.go b/pkg/controller/machineset/status.go index aafed7a464..0cf32e3598 100644 --- a/pkg/controller/machineset/status.go +++ b/pkg/controller/machineset/status.go @@ -19,7 +19,6 @@ package machineset import ( "context" "errors" - "fmt" "reflect" machinev1 "github.com/openshift/api/machine/v1beta1" @@ -67,6 +66,7 @@ func (c *ReconcileMachineSet) calculateStatus(ms *machinev1.MachineSet, filtered newStatus.FullyLabeledReplicas = int32(fullyLabeledReplicasCount) newStatus.ReadyReplicas = int32(readyReplicasCount) newStatus.AvailableReplicas = int32(availableReplicasCount) + newStatus.LabelSelector = metav1.FormatLabelSelector(&ms.Spec.Selector) return newStatus } @@ -80,6 +80,7 @@ func updateMachineSetStatus(c client.Client, ms *machinev1.MachineSet, newStatus ms.Status.FullyLabeledReplicas == newStatus.FullyLabeledReplicas && ms.Status.ReadyReplicas == newStatus.ReadyReplicas && ms.Status.AvailableReplicas == newStatus.AvailableReplicas && + ms.Status.LabelSelector == newStatus.LabelSelector && reflect.DeepEqual(ms.Status.Conditions, newStatus.Conditions) && ms.Generation == ms.Status.ObservedGeneration { return ms, nil @@ -97,13 +98,17 @@ func updateMachineSetStatus(c client.Client, ms *machinev1.MachineSet, newStatus if ms.Spec.Replicas != nil { replicas = *ms.Spec.Replicas } - klog.V(4).Infof("%s", fmt.Sprintf("Updating status for %v: %s/%s, ", ms.Kind, ms.Namespace, ms.Name)+ - fmt.Sprintf("replicas %d->%d (need %d), ", ms.Status.Replicas, newStatus.Replicas, replicas)+ - fmt.Sprintf("fullyLabeledReplicas %d->%d, ", ms.Status.FullyLabeledReplicas, newStatus.FullyLabeledReplicas)+ - fmt.Sprintf("readyReplicas %d->%d, ", ms.Status.ReadyReplicas, newStatus.ReadyReplicas)+ - fmt.Sprintf("availableReplicas %d->%d, ", ms.Status.AvailableReplicas, newStatus.AvailableReplicas)+ - fmt.Sprintf("sequence No: %v->%v", ms.Status.ObservedGeneration, newStatus.ObservedGeneration)+ - fmt.Sprintf("conditions: %v->%v", ms.Status.Conditions, newStatus.Conditions)) + klog.V(4).Infof( + "Updating status for %v: %s/%s, replicas %d->%d (need %d), fullyLabeledReplicas %d->%d, readyReplicas %d->%d, availableReplicas %d->%d, labelSelector %q->%q, sequence No: %v->%v, conditions: %v->%v", + ms.Kind, ms.Namespace, ms.Name, + ms.Status.Replicas, newStatus.Replicas, replicas, + ms.Status.FullyLabeledReplicas, newStatus.FullyLabeledReplicas, + ms.Status.ReadyReplicas, newStatus.ReadyReplicas, + ms.Status.AvailableReplicas, newStatus.AvailableReplicas, + ms.Status.LabelSelector, newStatus.LabelSelector, + ms.Status.ObservedGeneration, newStatus.ObservedGeneration, + ms.Status.Conditions, newStatus.Conditions, + ) ms.Status = newStatus patchErr = c.Status().Patch(context.Background(), ms, client.MergeFrom(machineSetCopy)) diff --git a/pkg/controller/machineset/status_test.go b/pkg/controller/machineset/status_test.go new file mode 100644 index 0000000000..be374c3a12 --- /dev/null +++ b/pkg/controller/machineset/status_test.go @@ -0,0 +1,114 @@ +/* +Copyright 2026 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package machineset + +import ( + "context" + "testing" + + . "github.com/onsi/gomega" + machinev1 "github.com/openshift/api/machine/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes/scheme" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" +) + +// TestMachineSetStatusLabelSelectorMatchesScaleSubresource verifies that status.labelSelector is the +// serialized label selector string expected by the CRD scale subresource (labelSelectorPath +// -> .status.labelSelector), which the apiserver maps to autoscaling/v1 Scale status.selector. +func TestMachineSetStatusLabelSelectorMatchesScaleSubresource(t *testing.T) { + tests := []struct { + name string + spec metav1.LabelSelector + }{ + { + name: "matchLabels", + spec: metav1.LabelSelector{ + MatchLabels: map[string]string{ + "machine.openshift.io/cluster-api-cluster": "cluster-id", + "machine.openshift.io/cluster-api-machineset": "workers", + }, + }, + }, + { + name: "matchExpressions", + spec: metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + {Key: "role", Operator: metav1.LabelSelectorOpIn, Values: []string{"worker", "infra"}}, + }, + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + g := NewWithT(t) + + ms := &machinev1.MachineSet{ + Spec: machinev1.MachineSetSpec{ + Selector: tc.spec, + }, + } + + got := (&ReconcileMachineSet{}).calculateStatus(ms, nil).LabelSelector + want := metav1.FormatLabelSelector(&ms.Spec.Selector) + g.Expect(got).To(Equal(want), "status.labelSelector must match Scale status.selector for HPA") + }) + } +} + +func TestUpdateMachineSetStatusUpdatesLabelSelectorWithoutReplicaChanges(t *testing.T) { + t.Helper() + + g := NewWithT(t) + + ms := &machinev1.MachineSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "machineset-test", + Namespace: "openshift-machine-api", + Generation: 1, + }, + Status: machinev1.MachineSetStatus{ + Replicas: 0, + FullyLabeledReplicas: 0, + ReadyReplicas: 0, + AvailableReplicas: 0, + ObservedGeneration: 1, + }, + } + + cl := fake.NewClientBuilder(). + WithScheme(scheme.Scheme). + WithRuntimeObjects(ms.DeepCopy()). + WithStatusSubresource(&machinev1.MachineSet{}). + Build() + + current := &machinev1.MachineSet{} + key := client.ObjectKeyFromObject(ms) + g.Expect(cl.Get(context.Background(), key, current)).To(Succeed(), "failed to fetch machineset") + + newStatus := current.Status + newStatus.LabelSelector = "machine.openshift.io/cluster-api-cluster=test-cluster" + + updated, err := updateMachineSetStatus(cl, current, newStatus) + g.Expect(err).NotTo(HaveOccurred(), "failed to update machineset status") + g.Expect(updated.Status.LabelSelector).To(Equal(newStatus.LabelSelector)) + + stored := &machinev1.MachineSet{} + g.Expect(cl.Get(context.Background(), key, stored)).To(Succeed(), "failed to refetch machineset") + g.Expect(stored.Status.LabelSelector).To(Equal(newStatus.LabelSelector)) +}