diff --git a/.github/workflows/ci-image-scanning-on-schedule.yml b/.github/workflows/ci-image-scanning-on-schedule.yml new file mode 100644 index 000000000000..5261657c058e --- /dev/null +++ b/.github/workflows/ci-image-scanning-on-schedule.yml @@ -0,0 +1,69 @@ +name: image-scanning-on-schedule +on: + schedule: + # Run this workflow "At 00:00 UTC on Sunday" + - cron: '0 0 * * 0' +permissions: + contents: read +jobs: + use-trivy-to-scan-image: + permissions: + security-events: write # for github/codeql-action/upload-sarif to upload SARIF results + name: image-scanning + if: ${{ github.repository == 'karmada-io/karmada' }} + runs-on: ubuntu-22.04 + strategy: + fail-fast: false + matrix: + target: + - karmada-controller-manager + - karmada-scheduler + - karmada-descheduler + - karmada-webhook + - karmada-agent + - karmada-scheduler-estimator + - karmada-interpreter-webhook-example + - karmada-aggregated-apiserver + - karmada-search + - karmada-operator + - karmada-metrics-adapter + karmada-version: [ release-1.11, release-1.10, release-1.9 ] + steps: + - name: checkout code + uses: actions/checkout@v4 + with: + ref: ${{ matrix.karmada-version }} + - name: install Go + uses: actions/setup-go@v5 + with: + go-version-file: go.mod + - id: gen_git_info + run: | + echo "ref=$(git rev-parse --symbolic-full-name HEAD)" >> "$GITHUB_OUTPUT" + echo "sha=$(git rev-parse HEAD)" >> "$GITHUB_OUTPUT" + - name: Build images from Dockerfile + run: | + export VERSION=${{ matrix.karmada-version }} + export REGISTRY="docker.io/karmada" + make image-${{ matrix.target }} + - name: Run Trivy vulnerability scanner + uses: aquasecurity/trivy-action@0.24.0 + with: + image-ref: 'docker.io/karmada/${{ matrix.target }}:${{ matrix.karmada-version }}' + format: 'sarif' + ignore-unfixed: true + vuln-type: 'os,library' + output: '${{ matrix.target }}:${{ matrix.karmada-version }}.trivy-results.sarif' + - name: display scan results + uses: aquasecurity/trivy-action@0.24.0 + with: + image-ref: 'docker.io/karmada/${{ matrix.target }}:${{ matrix.karmada-version }}' + format: 'table' + ignore-unfixed: true + vuln-type: 'os,library' + - name: Upload Trivy scan results to GitHub Security tab + uses: github/codeql-action/upload-sarif@v3 + with: + sarif_file: '${{ matrix.target }}:${{ matrix.karmada-version }}.trivy-results.sarif' + ref: ${{steps.gen_git_info.outputs.ref}} + sha: ${{steps.gen_git_info.outputs.sha}} diff --git a/.github/workflows/ci-image-scanning.yaml b/.github/workflows/ci-image-scanning.yaml index 24e51df3214b..acfece19404f 100644 --- a/.github/workflows/ci-image-scanning.yaml +++ b/.github/workflows/ci-image-scanning.yaml @@ -1,62 +1,62 @@ -name: image-scanning -on: - push: - # Exclude branches created by Dependabot to avoid triggering current workflow - # for PRs initiated by Dependabot. - branches-ignore: - - 'dependabot/**' -permissions: - contents: read -jobs: - use-trivy-to-scan-image: - permissions: - security-events: write # for github/codeql-action/upload-sarif to upload SARIF results - name: image-scanning - if: ${{ github.repository == 'karmada-io/karmada' }} - runs-on: ubuntu-22.04 - strategy: - fail-fast: false - matrix: - target: - - karmada-controller-manager - - karmada-scheduler - - karmada-descheduler - - karmada-webhook - - karmada-agent - - karmada-scheduler-estimator - - karmada-interpreter-webhook-example - - karmada-aggregated-apiserver - - karmada-search - - karmada-operator - - karmada-metrics-adapter - steps: - - name: checkout code - uses: actions/checkout@v4 - - name: install Go - uses: actions/setup-go@v5 - with: - go-version-file: go.mod - - name: Build an image from Dockerfile - run: | - export VERSION="latest" - export REGISTRY="docker.io/karmada" - make image-${{ matrix.target }} - - name: Run Trivy vulnerability scanner - uses: aquasecurity/trivy-action@0.24.0 - with: - image-ref: 'docker.io/karmada/${{ matrix.target }}:latest' - format: 'sarif' - ignore-unfixed: true - vuln-type: 'os,library' - output: 'trivy-results.sarif' - - name: display scan results - uses: aquasecurity/trivy-action@0.24.0 - with: - image-ref: 'docker.io/karmada/${{ matrix.target }}:latest' - format: 'table' - ignore-unfixed: true - vuln-type: 'os,library' - - name: Upload Trivy scan results to GitHub Security tab - uses: github/codeql-action/upload-sarif@v3 - with: - sarif_file: 'trivy-results.sarif' +name: image-scanning +on: + push: + # Exclude branches created by Dependabot to avoid triggering current workflow + # for PRs initiated by Dependabot. + branches-ignore: + - 'dependabot/**' +permissions: + contents: read +jobs: + use-trivy-to-scan-image: + permissions: + security-events: write # for github/codeql-action/upload-sarif to upload SARIF results + name: image-scanning + if: ${{ github.repository == 'karmada-io/karmada' }} + runs-on: ubuntu-22.04 + strategy: + fail-fast: false + matrix: + target: + - karmada-controller-manager + - karmada-scheduler + - karmada-descheduler + - karmada-webhook + - karmada-agent + - karmada-scheduler-estimator + - karmada-interpreter-webhook-example + - karmada-aggregated-apiserver + - karmada-search + - karmada-operator + - karmada-metrics-adapter + steps: + - name: checkout code + uses: actions/checkout@v4 + - name: install Go + uses: actions/setup-go@v5 + with: + go-version-file: go.mod + - name: Build an image from Dockerfile + run: | + export VERSION="latest" + export REGISTRY="docker.io/karmada" + make image-${{ matrix.target }} + - name: Run Trivy vulnerability scanner + uses: aquasecurity/trivy-action@0.24.0 + with: + image-ref: 'docker.io/karmada/${{ matrix.target }}:latest' + format: 'sarif' + ignore-unfixed: true + vuln-type: 'os,library' + output: 'trivy-results.sarif' + - name: display scan results + uses: aquasecurity/trivy-action@0.24.0 + with: + image-ref: 'docker.io/karmada/${{ matrix.target }}:latest' + format: 'table' + ignore-unfixed: true + vuln-type: 'os,library' + - name: Upload Trivy scan results to GitHub Security tab + uses: github/codeql-action/upload-sarif@v3 + with: + sarif_file: 'trivy-results.sarif' diff --git a/.github/workflows/dockerhub-latest-image.yml b/.github/workflows/dockerhub-latest-image.yml index 1190b7276514..bdc1de493a87 100644 --- a/.github/workflows/dockerhub-latest-image.yml +++ b/.github/workflows/dockerhub-latest-image.yml @@ -42,7 +42,7 @@ jobs: with: go-version-file: go.mod - name: Install Cosign - uses: sigstore/cosign-installer@v3.6.0 + uses: sigstore/cosign-installer@v3.7.0 with: cosign-release: 'v2.2.3' - name: install QEMU diff --git a/.github/workflows/dockerhub-released-image.yml b/.github/workflows/dockerhub-released-image.yml index 2b9076b4f917..b1ff45d78dfc 100644 --- a/.github/workflows/dockerhub-released-image.yml +++ b/.github/workflows/dockerhub-released-image.yml @@ -38,7 +38,7 @@ jobs: with: go-version-file: go.mod - name: Install Cosign - uses: sigstore/cosign-installer@v3.6.0 + uses: sigstore/cosign-installer@v3.7.0 with: cosign-release: 'v2.2.3' - name: install QEMU diff --git a/api/openapi-spec/swagger.json b/api/openapi-spec/swagger.json index 39bdc00820c4..0534e67a4663 100644 --- a/api/openapi-spec/swagger.json +++ b/api/openapi-spec/swagger.json @@ -18691,6 +18691,36 @@ } } }, + "com.github.karmada-io.karmada.pkg.apis.policy.v1alpha1.FieldOverrider": { + "description": "FieldOverrider represents the rules dedicated to modifying a specific field in any Kubernetes resource. This allows changing a single field within the resource with multiple operations. It is designed to handle structured field values such as those found in ConfigMaps or Secrets. The current implementation supports JSON and YAML formats, but can easily be extended to support XML in the future. Note: In any given instance, FieldOverrider processes either JSON or YAML fields, but not both simultaneously.", + "type": "object", + "required": [ + "fieldPath" + ], + "properties": { + "fieldPath": { + "description": "FieldPath specifies the initial location in the instance document where the operation should take place. The path uses RFC 6901 for navigating into nested structures. For example, the path \"/data/db-config.yaml\" specifies the configuration data key named \"db-config.yaml\" in a ConfigMap: \"/data/db-config.yaml\".", + "type": "string", + "default": "" + }, + "json": { + "description": "JSON represents the operations performed on the JSON document specified by the FieldPath.", + "type": "array", + "items": { + "default": {}, + "$ref": "#/definitions/com.github.karmada-io.karmada.pkg.apis.policy.v1alpha1.JSONPatchOperation" + } + }, + "yaml": { + "description": "YAML represents the operations performed on the YAML document specified by the FieldPath.", + "type": "array", + "items": { + "default": {}, + "$ref": "#/definitions/com.github.karmada-io.karmada.pkg.apis.policy.v1alpha1.YAMLPatchOperation" + } + } + } + }, "com.github.karmada-io.karmada.pkg.apis.policy.v1alpha1.FieldSelector": { "description": "FieldSelector is a field filter.", "type": "object", @@ -18747,6 +18777,30 @@ } } }, + "com.github.karmada-io.karmada.pkg.apis.policy.v1alpha1.JSONPatchOperation": { + "description": "JSONPatchOperation represents a single field modification operation for JSON format.", + "type": "object", + "required": [ + "subPath", + "operator" + ], + "properties": { + "operator": { + "description": "Operator indicates the operation on target field. Available operators are: \"add\", \"remove\", and \"replace\".", + "type": "string", + "default": "" + }, + "subPath": { + "description": "SubPath specifies the relative location within the initial FieldPath where the operation should take place. The path uses RFC 6901 for navigating into nested structures.", + "type": "string", + "default": "" + }, + "value": { + "description": "Value is the new value to set for the specified field if the operation is \"add\" or \"replace\". For \"remove\" operation, this field is ignored.", + "$ref": "#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.JSON" + } + } + }, "com.github.karmada-io.karmada.pkg.apis.policy.v1alpha1.LabelAnnotationOverrider": { "description": "LabelAnnotationOverrider represents the rules dedicated to handling workload labels/annotations", "type": "object", @@ -18871,7 +18925,7 @@ } }, "com.github.karmada-io.karmada.pkg.apis.policy.v1alpha1.Overriders": { - "description": "Overriders offers various alternatives to represent the override rules.\n\nIf more than one alternative exists, they will be applied with following order: - ImageOverrider - CommandOverrider - ArgsOverrider - LabelsOverrider - AnnotationsOverrider - Plaintext", + "description": "Overriders offers various alternatives to represent the override rules.\n\nIf more than one alternative exists, they will be applied with following order: - ImageOverrider - CommandOverrider - ArgsOverrider - LabelsOverrider - AnnotationsOverrider - FieldOverrider - Plaintext", "type": "object", "properties": { "annotationsOverrider": { @@ -18898,6 +18952,14 @@ "$ref": "#/definitions/com.github.karmada-io.karmada.pkg.apis.policy.v1alpha1.CommandArgsOverrider" } }, + "fieldOverrider": { + "description": "FieldOverrider represents the rules dedicated to modifying a specific field in any Kubernetes resource. This allows changing a single field within the resource with multiple operations. It is designed to handle structured field values such as those found in ConfigMaps or Secrets. The current implementation supports JSON and YAML formats, but can easily be extended to support XML in the future.", + "type": "array", + "items": { + "default": {}, + "$ref": "#/definitions/com.github.karmada-io.karmada.pkg.apis.policy.v1alpha1.FieldOverrider" + } + }, "imageOverrider": { "description": "ImageOverrider represents the rules dedicated to handling image overrides.", "type": "array", @@ -19098,6 +19160,10 @@ "Never" ] }, + "preserveResourcesOnDeletion": { + "description": "PreserveResourcesOnDeletion controls whether resources should be preserved on the member clusters when the resource template is deleted. If set to true, resources will be preserved on the member clusters. Default is false, which means resources will be deleted along with the resource template.\n\nThis setting is particularly useful during workload migration scenarios to ensure that rollback can occur quickly without affecting the workloads running on the member clusters.\n\nAdditionally, this setting applies uniformly across all member clusters and will not selectively control preservation on only some clusters.\n\nNote: This setting does not apply to the deletion of the policy itself. When the policy is deleted, the resource templates and their corresponding propagated resources in member clusters will remain unchanged unless explicitly deleted.", + "type": "boolean" + }, "priority": { "description": "Priority indicates the importance of a policy(PropagationPolicy or ClusterPropagationPolicy). A policy will be applied for the matched resource templates if there is no other policies with higher priority at the point of the resource template be processed. Once a resource template has been claimed by a policy, by default it will not be preempted by following policies even with a higher priority. See Preemption for more details.\n\nIn case of two policies have the same priority, the one with a more precise matching rules in ResourceSelectors wins: - matching by name(resourceSelector.name) has higher priority than\n by selector(resourceSelector.labelSelector)\n- matching by selector(resourceSelector.labelSelector) has higher priority\n than by APIVersion(resourceSelector.apiVersion) and Kind(resourceSelector.kind).\nIf there is still no winner at this point, the one with the lower alphabetic order wins, e.g. policy 'bar' has higher priority than 'foo'.\n\nThe higher the value, the higher the priority. Defaults to zero.", "type": "integer", @@ -19288,6 +19354,30 @@ } } }, + "com.github.karmada-io.karmada.pkg.apis.policy.v1alpha1.YAMLPatchOperation": { + "description": "YAMLPatchOperation represents a single field modification operation for YAML format.", + "type": "object", + "required": [ + "subPath", + "operator" + ], + "properties": { + "operator": { + "description": "Operator indicates the operation on target field. Available operators are: \"add\", \"remove\", and \"replace\".", + "type": "string", + "default": "" + }, + "subPath": { + "description": "SubPath specifies the relative location within the initial FieldPath where the operation should take place. The path uses RFC 6901 for navigating into nested structures.", + "type": "string", + "default": "" + }, + "value": { + "description": "Value is the new value to set for the specified field if the operation is \"add\" or \"replace\". For \"remove\" operation, this field is ignored.", + "$ref": "#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.JSON" + } + } + }, "com.github.karmada-io.karmada.pkg.apis.remedy.v1alpha1.ClusterAffinity": { "description": "ClusterAffinity represents the filter to select clusters.", "type": "object", @@ -19752,8 +19842,12 @@ "description": "WorkSpec defines the desired state of Work.", "type": "object", "properties": { + "preserveResourcesOnDeletion": { + "description": "PreserveResourcesOnDeletion controls whether resources should be preserved on the member cluster when the Work object is deleted. If set to true, resources will be preserved on the member cluster. Default is false, which means resources will be deleted along with the Work object.", + "type": "boolean" + }, "suspendDispatching": { - "description": "SuspendDispatching controls whether dispatching should be suspended, nil means not suspend. Note: true means stop propagating to all clusters.", + "description": "SuspendDispatching controls whether dispatching should be suspended, nil means not suspend. Note: true means stop propagating to the corresponding member cluster, and does not prevent status collection.", "type": "boolean" }, "workload": { @@ -20174,6 +20268,10 @@ "description": "Placement represents the rule for select clusters to propagate resources.", "$ref": "#/definitions/com.github.karmada-io.karmada.pkg.apis.policy.v1alpha1.Placement" }, + "preserveResourcesOnDeletion": { + "description": "PreserveResourcesOnDeletion controls whether resources should be preserved on the member clusters when the binding object is deleted. If set to true, resources will be preserved on the member clusters. Default is false, which means resources will be deleted along with the binding object. This setting applies to all Work objects created under this binding object.", + "type": "boolean" + }, "propagateDeps": { "description": "PropagateDeps tells if relevant resources should be propagated automatically. It is inherited from PropagationPolicy or ClusterPropagationPolicy. default false.", "type": "boolean" diff --git a/artifacts/deploy/karmada-descheduler.yaml b/artifacts/deploy/karmada-descheduler.yaml index df8feeb11a8c..696507be4b2c 100644 --- a/artifacts/deploy/karmada-descheduler.yaml +++ b/artifacts/deploy/karmada-descheduler.yaml @@ -26,7 +26,7 @@ spec: command: - /bin/karmada-descheduler - --kubeconfig=/etc/kubeconfig - - --metrics-bind-address=0.0.0.0:10358 + - --metrics-bind-address=0.0.0.0:8080 - --health-probe-bind-address=0.0.0.0:10358 - --scheduler-estimator-ca-file=/etc/karmada/pki/ca.crt - --scheduler-estimator-cert-file=/etc/karmada/pki/karmada.crt @@ -42,7 +42,7 @@ spec: periodSeconds: 15 timeoutSeconds: 5 ports: - - containerPort: 10358 + - containerPort: 8080 name: metrics protocol: TCP volumeMounts: diff --git a/artifacts/deploy/karmada-scheduler.yaml b/artifacts/deploy/karmada-scheduler.yaml index 504de78f1fe6..78ea39224650 100644 --- a/artifacts/deploy/karmada-scheduler.yaml +++ b/artifacts/deploy/karmada-scheduler.yaml @@ -33,14 +33,13 @@ spec: periodSeconds: 15 timeoutSeconds: 5 ports: - - containerPort: 10351 + - containerPort: 8080 name: metrics protocol: TCP command: - /bin/karmada-scheduler - --kubeconfig=/etc/kubeconfig - - --bind-address=0.0.0.0 - - --metrics-bind-address=0.0.0.0:10351 + - --metrics-bind-address=0.0.0.0:8080 - --health-probe-bind-address=0.0.0.0:10351 - --enable-scheduler-estimator=true - --scheduler-estimator-ca-file=/etc/karmada/pki/ca.crt diff --git a/charts/index.yaml b/charts/index.yaml index 06968d2d38f4..d92b1578721b 100644 --- a/charts/index.yaml +++ b/charts/index.yaml @@ -2,7 +2,47 @@ apiVersion: v1 entries: karmada: - apiVersion: v2 - appVersion: latest + appVersion: v1.1.0 + created: "2024-09-21T12:09:38.421759709+08:00" + dependencies: + - name: common + repository: https://charts.bitnami.com/bitnami + version: 2.x.x + description: A Helm chart for karmada + digest: d4d9bbbda5ac03a4ba9349b17973b3df9047849398735dcb36cec4ec39140046 + kubeVersion: '>= 1.16.0-0' + maintainers: + - email: chaosi@zju.edu.cn + name: chaosi-zju + - email: amiralavi7@gmail.com + name: a7i + name: karmada + type: application + urls: + - https://github.com/karmada-io/karmada/releases/download/v1.11.0/karmada-chart-v1.11.0.tgz + version: v1.11.0 + - apiVersion: v2 + appVersion: v1.1.0 + created: "2024-09-21T12:04:45.631679376+08:00" + dependencies: + - name: common + repository: https://charts.bitnami.com/bitnami + version: 2.x.x + description: A Helm chart for karmada + digest: c381332dfebd6a4473cfcdb68abfca4916d415088cc90707b7336001e8cb20db + kubeVersion: '>= 1.16.0-0' + maintainers: + - email: chaosi@zju.edu.cn + name: chaosi-zju + - email: amiralavi7@gmail.com + name: a7i + name: karmada + type: application + urls: + - https://github.com/karmada-io/karmada/releases/download/v1.10.0/karmada-chart-v1.10.0.tgz + version: v1.10.0 + - apiVersion: v2 + appVersion: v1.1.0 created: "2024-02-29T16:55:17.965911-05:00" dependencies: - name: common @@ -242,6 +282,68 @@ entries: - https://github.com/karmada-io/karmada/releases/download/v1.2.0/karmada-chart-v1.2.0.tgz version: v1.2.0 karmada-operator: + - apiVersion: v2 + appVersion: v1.1.0 + created: "2024-09-21T15:01:05.712207268+08:00" + dependencies: + - name: common + repository: https://charts.bitnami.com/bitnami + version: 1.x.x + description: A Helm chart for karmada-operator + digest: d0aecd28e92ebfa5e9cb4836a38b878c3355f2f068f3645b7152a00625c168c6 + kubeVersion: '>= 1.16.0-0' + maintainers: + - email: wen.chen@daocloud.io + name: calvin0327 + - email: chaosi@zju.edu.cn + name: chaosi-zju + - email: amiralavi7@gmail.com + name: a7i + name: karmada-operator + type: application + urls: + - https://github.com/karmada-io/karmada/releases/download/v1.11.0/karmada-operator-chart-v1.11.0.tgz + version: v1.11.0 + - apiVersion: v2 + appVersion: v1.1.0 + created: "2024-09-21T14:59:09.729691529+08:00" + dependencies: + - name: common + repository: https://charts.bitnami.com/bitnami + version: 1.x.x + description: A Helm chart for karmada-operator + digest: a8794bcbfcf96d5ad2a7f7e976fac4d599437858f05d94acd7bf6c493ec6401e + kubeVersion: '>= 1.16.0-0' + maintainers: + - email: wen.chen@daocloud.io + name: calvin0327 + - email: chaosi@zju.edu.cn + name: chaosi-zju + - email: amiralavi7@gmail.com + name: a7i + name: karmada-operator + type: application + urls: + - https://github.com/karmada-io/karmada/releases/download/v1.10.0/karmada-operator-chart-v1.10.0.tgz + version: v1.10.0 + - apiVersion: v2 + appVersion: v1.1.0 + created: "2024-09-21T14:56:28.687039261+08:00" + dependencies: + - name: common + repository: https://charts.bitnami.com/bitnami + version: 1.x.x + description: A Helm chart for karmada-operator + digest: 609743e4c8bbe381fbc822e0957ccb1ba47ff5a84f33f86ee16f9f3c6d4c7eed + kubeVersion: '>= 1.16.0-0' + maintainers: + - email: wen.chen@daocloud.io + name: calvin0327 + name: karmada-operator + type: application + urls: + - https://github.com/karmada-io/karmada/releases/download/v1.9.0/karmada-operator-chart-v1.9.0.tgz + version: v1.9.0 - apiVersion: v2 appVersion: v1.1.0 created: "2023-11-30T11:47:32.390757-08:00" @@ -260,4 +362,4 @@ entries: urls: - https://github.com/karmada-io/karmada/releases/download/v1.8.0/karmada-operator-chart-v1.8.0.tgz version: v1.8.0 -generated: "2024-02-29T16:55:17.961702-05:00" +generated: "2024-09-21T15:01:05.710041947+08:00" diff --git a/charts/karmada-operator/Chart.yaml b/charts/karmada-operator/Chart.yaml index 2a055d3909fb..9c0f34596001 100644 --- a/charts/karmada-operator/Chart.yaml +++ b/charts/karmada-operator/Chart.yaml @@ -26,7 +26,7 @@ version: 0.0.1 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to # follow Semantic Versioning. They should reflect the version the application is using. -appVersion: latest +appVersion: v1.1.0 # This is karmada dependencies dependencies: @@ -38,3 +38,7 @@ dependencies: maintainers: - name: calvin0327 email: wen.chen@daocloud.io + - email: chaosi@zju.edu.cn + name: chaosi-zju + - email: amiralavi7@gmail.com + name: a7i diff --git a/charts/karmada/Chart.yaml b/charts/karmada/Chart.yaml index b9c4d8141057..8adc96f7f788 100644 --- a/charts/karmada/Chart.yaml +++ b/charts/karmada/Chart.yaml @@ -26,7 +26,7 @@ version: 0.0.0 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to # follow Semantic Versioning. They should reflect the version the application is using. -appVersion: latest +appVersion: v1.1.0 # This is karmada dependencies dependencies: @@ -36,9 +36,7 @@ dependencies: # This is karmada maintainers maintainers: - - name: jrkeen - email: jrkeen@hotmail.com - - name: pidb - email: jackson.cloudnative@gmail.com - - name: Poor12 - email: shentiecheng@huawei.com + - email: chaosi@zju.edu.cn + name: chaosi-zju + - email: amiralavi7@gmail.com + name: a7i diff --git a/charts/karmada/_crds/bases/policy/policy.karmada.io_clusteroverridepolicies.yaml b/charts/karmada/_crds/bases/policy/policy.karmada.io_clusteroverridepolicies.yaml index 360c08017a43..a74df1653c94 100644 --- a/charts/karmada/_crds/bases/policy/policy.karmada.io_clusteroverridepolicies.yaml +++ b/charts/karmada/_crds/bases/policy/policy.karmada.io_clusteroverridepolicies.yaml @@ -144,6 +144,92 @@ spec: - operator type: object type: array + fieldOverrider: + description: |- + FieldOverrider represents the rules dedicated to modifying a specific field in any Kubernetes resource. + This allows changing a single field within the resource with multiple operations. + It is designed to handle structured field values such as those found in ConfigMaps or Secrets. + The current implementation supports JSON and YAML formats, but can easily be extended to support XML in the future. + items: + description: |- + FieldOverrider represents the rules dedicated to modifying a specific field in any Kubernetes resource. + This allows changing a single field within the resource with multiple operations. + It is designed to handle structured field values such as those found in ConfigMaps or Secrets. + The current implementation supports JSON and YAML formats, but can easily be extended to support XML in the future. + Note: In any given instance, FieldOverrider processes either JSON or YAML fields, but not both simultaneously. + properties: + fieldPath: + description: |- + FieldPath specifies the initial location in the instance document where the operation should take place. + The path uses RFC 6901 for navigating into nested structures. For example, the path "/data/db-config.yaml" + specifies the configuration data key named "db-config.yaml" in a ConfigMap: "/data/db-config.yaml". + type: string + json: + description: JSON represents the operations performed + on the JSON document specified by the FieldPath. + items: + description: JSONPatchOperation represents a single + field modification operation for JSON format. + properties: + operator: + description: |- + Operator indicates the operation on target field. + Available operators are: "add", "remove", and "replace". + enum: + - add + - remove + - replace + type: string + subPath: + description: |- + SubPath specifies the relative location within the initial FieldPath where the operation should take place. + The path uses RFC 6901 for navigating into nested structures. + type: string + value: + description: |- + Value is the new value to set for the specified field if the operation is "add" or "replace". + For "remove" operation, this field is ignored. + x-kubernetes-preserve-unknown-fields: true + required: + - operator + - subPath + type: object + type: array + yaml: + description: YAML represents the operations performed + on the YAML document specified by the FieldPath. + items: + description: YAMLPatchOperation represents a single + field modification operation for YAML format. + properties: + operator: + description: |- + Operator indicates the operation on target field. + Available operators are: "add", "remove", and "replace". + enum: + - add + - remove + - replace + type: string + subPath: + description: |- + SubPath specifies the relative location within the initial FieldPath where the operation should take place. + The path uses RFC 6901 for navigating into nested structures. + type: string + value: + description: |- + Value is the new value to set for the specified field if the operation is "add" or "replace". + For "remove" operation, this field is ignored. + x-kubernetes-preserve-unknown-fields: true + required: + - operator + - subPath + type: object + type: array + required: + - fieldPath + type: object + type: array imageOverrider: description: ImageOverrider represents the rules dedicated to handling image overrides. @@ -481,6 +567,92 @@ spec: - operator type: object type: array + fieldOverrider: + description: |- + FieldOverrider represents the rules dedicated to modifying a specific field in any Kubernetes resource. + This allows changing a single field within the resource with multiple operations. + It is designed to handle structured field values such as those found in ConfigMaps or Secrets. + The current implementation supports JSON and YAML formats, but can easily be extended to support XML in the future. + items: + description: |- + FieldOverrider represents the rules dedicated to modifying a specific field in any Kubernetes resource. + This allows changing a single field within the resource with multiple operations. + It is designed to handle structured field values such as those found in ConfigMaps or Secrets. + The current implementation supports JSON and YAML formats, but can easily be extended to support XML in the future. + Note: In any given instance, FieldOverrider processes either JSON or YAML fields, but not both simultaneously. + properties: + fieldPath: + description: |- + FieldPath specifies the initial location in the instance document where the operation should take place. + The path uses RFC 6901 for navigating into nested structures. For example, the path "/data/db-config.yaml" + specifies the configuration data key named "db-config.yaml" in a ConfigMap: "/data/db-config.yaml". + type: string + json: + description: JSON represents the operations performed on + the JSON document specified by the FieldPath. + items: + description: JSONPatchOperation represents a single field + modification operation for JSON format. + properties: + operator: + description: |- + Operator indicates the operation on target field. + Available operators are: "add", "remove", and "replace". + enum: + - add + - remove + - replace + type: string + subPath: + description: |- + SubPath specifies the relative location within the initial FieldPath where the operation should take place. + The path uses RFC 6901 for navigating into nested structures. + type: string + value: + description: |- + Value is the new value to set for the specified field if the operation is "add" or "replace". + For "remove" operation, this field is ignored. + x-kubernetes-preserve-unknown-fields: true + required: + - operator + - subPath + type: object + type: array + yaml: + description: YAML represents the operations performed on + the YAML document specified by the FieldPath. + items: + description: YAMLPatchOperation represents a single field + modification operation for YAML format. + properties: + operator: + description: |- + Operator indicates the operation on target field. + Available operators are: "add", "remove", and "replace". + enum: + - add + - remove + - replace + type: string + subPath: + description: |- + SubPath specifies the relative location within the initial FieldPath where the operation should take place. + The path uses RFC 6901 for navigating into nested structures. + type: string + value: + description: |- + Value is the new value to set for the specified field if the operation is "add" or "replace". + For "remove" operation, this field is ignored. + x-kubernetes-preserve-unknown-fields: true + required: + - operator + - subPath + type: object + type: array + required: + - fieldPath + type: object + type: array imageOverrider: description: ImageOverrider represents the rules dedicated to handling image overrides. diff --git a/charts/karmada/_crds/bases/policy/policy.karmada.io_clusterpropagationpolicies.yaml b/charts/karmada/_crds/bases/policy/policy.karmada.io_clusterpropagationpolicies.yaml index c1ac7c57262e..1def90ac42f3 100644 --- a/charts/karmada/_crds/bases/policy/policy.karmada.io_clusterpropagationpolicies.yaml +++ b/charts/karmada/_crds/bases/policy/policy.karmada.io_clusterpropagationpolicies.yaml @@ -687,6 +687,27 @@ spec: - Always - Never type: string + preserveResourcesOnDeletion: + description: |- + PreserveResourcesOnDeletion controls whether resources should be preserved on the + member clusters when the resource template is deleted. + If set to true, resources will be preserved on the member clusters. + Default is false, which means resources will be deleted along with the resource template. + + + This setting is particularly useful during workload migration scenarios to ensure + that rollback can occur quickly without affecting the workloads running on the + member clusters. + + + Additionally, this setting applies uniformly across all member clusters and will not + selectively control preservation on only some clusters. + + + Note: This setting does not apply to the deletion of the policy itself. + When the policy is deleted, the resource templates and their corresponding + propagated resources in member clusters will remain unchanged unless explicitly deleted. + type: boolean priority: default: 0 description: |- diff --git a/charts/karmada/_crds/bases/policy/policy.karmada.io_overridepolicies.yaml b/charts/karmada/_crds/bases/policy/policy.karmada.io_overridepolicies.yaml index eb12e693d040..b2c4fbc02dea 100644 --- a/charts/karmada/_crds/bases/policy/policy.karmada.io_overridepolicies.yaml +++ b/charts/karmada/_crds/bases/policy/policy.karmada.io_overridepolicies.yaml @@ -144,6 +144,92 @@ spec: - operator type: object type: array + fieldOverrider: + description: |- + FieldOverrider represents the rules dedicated to modifying a specific field in any Kubernetes resource. + This allows changing a single field within the resource with multiple operations. + It is designed to handle structured field values such as those found in ConfigMaps or Secrets. + The current implementation supports JSON and YAML formats, but can easily be extended to support XML in the future. + items: + description: |- + FieldOverrider represents the rules dedicated to modifying a specific field in any Kubernetes resource. + This allows changing a single field within the resource with multiple operations. + It is designed to handle structured field values such as those found in ConfigMaps or Secrets. + The current implementation supports JSON and YAML formats, but can easily be extended to support XML in the future. + Note: In any given instance, FieldOverrider processes either JSON or YAML fields, but not both simultaneously. + properties: + fieldPath: + description: |- + FieldPath specifies the initial location in the instance document where the operation should take place. + The path uses RFC 6901 for navigating into nested structures. For example, the path "/data/db-config.yaml" + specifies the configuration data key named "db-config.yaml" in a ConfigMap: "/data/db-config.yaml". + type: string + json: + description: JSON represents the operations performed + on the JSON document specified by the FieldPath. + items: + description: JSONPatchOperation represents a single + field modification operation for JSON format. + properties: + operator: + description: |- + Operator indicates the operation on target field. + Available operators are: "add", "remove", and "replace". + enum: + - add + - remove + - replace + type: string + subPath: + description: |- + SubPath specifies the relative location within the initial FieldPath where the operation should take place. + The path uses RFC 6901 for navigating into nested structures. + type: string + value: + description: |- + Value is the new value to set for the specified field if the operation is "add" or "replace". + For "remove" operation, this field is ignored. + x-kubernetes-preserve-unknown-fields: true + required: + - operator + - subPath + type: object + type: array + yaml: + description: YAML represents the operations performed + on the YAML document specified by the FieldPath. + items: + description: YAMLPatchOperation represents a single + field modification operation for YAML format. + properties: + operator: + description: |- + Operator indicates the operation on target field. + Available operators are: "add", "remove", and "replace". + enum: + - add + - remove + - replace + type: string + subPath: + description: |- + SubPath specifies the relative location within the initial FieldPath where the operation should take place. + The path uses RFC 6901 for navigating into nested structures. + type: string + value: + description: |- + Value is the new value to set for the specified field if the operation is "add" or "replace". + For "remove" operation, this field is ignored. + x-kubernetes-preserve-unknown-fields: true + required: + - operator + - subPath + type: object + type: array + required: + - fieldPath + type: object + type: array imageOverrider: description: ImageOverrider represents the rules dedicated to handling image overrides. @@ -481,6 +567,92 @@ spec: - operator type: object type: array + fieldOverrider: + description: |- + FieldOverrider represents the rules dedicated to modifying a specific field in any Kubernetes resource. + This allows changing a single field within the resource with multiple operations. + It is designed to handle structured field values such as those found in ConfigMaps or Secrets. + The current implementation supports JSON and YAML formats, but can easily be extended to support XML in the future. + items: + description: |- + FieldOverrider represents the rules dedicated to modifying a specific field in any Kubernetes resource. + This allows changing a single field within the resource with multiple operations. + It is designed to handle structured field values such as those found in ConfigMaps or Secrets. + The current implementation supports JSON and YAML formats, but can easily be extended to support XML in the future. + Note: In any given instance, FieldOverrider processes either JSON or YAML fields, but not both simultaneously. + properties: + fieldPath: + description: |- + FieldPath specifies the initial location in the instance document where the operation should take place. + The path uses RFC 6901 for navigating into nested structures. For example, the path "/data/db-config.yaml" + specifies the configuration data key named "db-config.yaml" in a ConfigMap: "/data/db-config.yaml". + type: string + json: + description: JSON represents the operations performed on + the JSON document specified by the FieldPath. + items: + description: JSONPatchOperation represents a single field + modification operation for JSON format. + properties: + operator: + description: |- + Operator indicates the operation on target field. + Available operators are: "add", "remove", and "replace". + enum: + - add + - remove + - replace + type: string + subPath: + description: |- + SubPath specifies the relative location within the initial FieldPath where the operation should take place. + The path uses RFC 6901 for navigating into nested structures. + type: string + value: + description: |- + Value is the new value to set for the specified field if the operation is "add" or "replace". + For "remove" operation, this field is ignored. + x-kubernetes-preserve-unknown-fields: true + required: + - operator + - subPath + type: object + type: array + yaml: + description: YAML represents the operations performed on + the YAML document specified by the FieldPath. + items: + description: YAMLPatchOperation represents a single field + modification operation for YAML format. + properties: + operator: + description: |- + Operator indicates the operation on target field. + Available operators are: "add", "remove", and "replace". + enum: + - add + - remove + - replace + type: string + subPath: + description: |- + SubPath specifies the relative location within the initial FieldPath where the operation should take place. + The path uses RFC 6901 for navigating into nested structures. + type: string + value: + description: |- + Value is the new value to set for the specified field if the operation is "add" or "replace". + For "remove" operation, this field is ignored. + x-kubernetes-preserve-unknown-fields: true + required: + - operator + - subPath + type: object + type: array + required: + - fieldPath + type: object + type: array imageOverrider: description: ImageOverrider represents the rules dedicated to handling image overrides. diff --git a/charts/karmada/_crds/bases/policy/policy.karmada.io_propagationpolicies.yaml b/charts/karmada/_crds/bases/policy/policy.karmada.io_propagationpolicies.yaml index 1415e24325dd..43498e621541 100644 --- a/charts/karmada/_crds/bases/policy/policy.karmada.io_propagationpolicies.yaml +++ b/charts/karmada/_crds/bases/policy/policy.karmada.io_propagationpolicies.yaml @@ -684,6 +684,27 @@ spec: - Always - Never type: string + preserveResourcesOnDeletion: + description: |- + PreserveResourcesOnDeletion controls whether resources should be preserved on the + member clusters when the resource template is deleted. + If set to true, resources will be preserved on the member clusters. + Default is false, which means resources will be deleted along with the resource template. + + + This setting is particularly useful during workload migration scenarios to ensure + that rollback can occur quickly without affecting the workloads running on the + member clusters. + + + Additionally, this setting applies uniformly across all member clusters and will not + selectively control preservation on only some clusters. + + + Note: This setting does not apply to the deletion of the policy itself. + When the policy is deleted, the resource templates and their corresponding + propagated resources in member clusters will remain unchanged unless explicitly deleted. + type: boolean priority: default: 0 description: |- diff --git a/charts/karmada/_crds/bases/work/work.karmada.io_clusterresourcebindings.yaml b/charts/karmada/_crds/bases/work/work.karmada.io_clusterresourcebindings.yaml index 7895411a95a2..82e3fab48523 100644 --- a/charts/karmada/_crds/bases/work/work.karmada.io_clusterresourcebindings.yaml +++ b/charts/karmada/_crds/bases/work/work.karmada.io_clusterresourcebindings.yaml @@ -930,6 +930,14 @@ spec: type: object type: array type: object + preserveResourcesOnDeletion: + description: |- + PreserveResourcesOnDeletion controls whether resources should be preserved on the + member clusters when the binding object is deleted. + If set to true, resources will be preserved on the member clusters. + Default is false, which means resources will be deleted along with the binding object. + This setting applies to all Work objects created under this binding object. + type: boolean propagateDeps: description: |- PropagateDeps tells if relevant resources should be propagated automatically. diff --git a/charts/karmada/_crds/bases/work/work.karmada.io_resourcebindings.yaml b/charts/karmada/_crds/bases/work/work.karmada.io_resourcebindings.yaml index f69531f230bd..78f9a5102cf2 100644 --- a/charts/karmada/_crds/bases/work/work.karmada.io_resourcebindings.yaml +++ b/charts/karmada/_crds/bases/work/work.karmada.io_resourcebindings.yaml @@ -930,6 +930,14 @@ spec: type: object type: array type: object + preserveResourcesOnDeletion: + description: |- + PreserveResourcesOnDeletion controls whether resources should be preserved on the + member clusters when the binding object is deleted. + If set to true, resources will be preserved on the member clusters. + Default is false, which means resources will be deleted along with the binding object. + This setting applies to all Work objects created under this binding object. + type: boolean propagateDeps: description: |- PropagateDeps tells if relevant resources should be propagated automatically. diff --git a/charts/karmada/_crds/bases/work/work.karmada.io_works.yaml b/charts/karmada/_crds/bases/work/work.karmada.io_works.yaml index 6f1fa665e438..0c2401db7397 100644 --- a/charts/karmada/_crds/bases/work/work.karmada.io_works.yaml +++ b/charts/karmada/_crds/bases/work/work.karmada.io_works.yaml @@ -54,11 +54,19 @@ spec: spec: description: Spec represents the desired behavior of Work. properties: + preserveResourcesOnDeletion: + description: |- + PreserveResourcesOnDeletion controls whether resources should be preserved on the + member cluster when the Work object is deleted. + If set to true, resources will be preserved on the member cluster. + Default is false, which means resources will be deleted along with the Work object. + type: boolean suspendDispatching: description: |- SuspendDispatching controls whether dispatching should be suspended, nil means not suspend. - Note: true means stop propagating to all clusters. + Note: true means stop propagating to the corresponding member cluster, and + does not prevent status collection. type: boolean workload: description: Workload represents the manifest workload to be deployed diff --git a/charts/karmada/templates/karmada-descheduler.yaml b/charts/karmada/templates/karmada-descheduler.yaml index 577e05e57b5e..21772976f5c3 100644 --- a/charts/karmada/templates/karmada-descheduler.yaml +++ b/charts/karmada/templates/karmada-descheduler.yaml @@ -50,7 +50,7 @@ spec: command: - /bin/karmada-descheduler - --kubeconfig=/etc/kubeconfig - - --metrics-bind-address=0.0.0.0:10358 + - --metrics-bind-address=0.0.0.0:8080 - --health-probe-bind-address=0.0.0.0:10358 - --leader-elect-resource-namespace={{ $systemNamespace }} - --scheduler-estimator-ca-file=/etc/karmada/pki/server-ca.crt @@ -67,7 +67,7 @@ spec: periodSeconds: 15 timeoutSeconds: 5 ports: - - containerPort: 10358 + - containerPort: 8080 name: metrics protocol: TCP volumeMounts: diff --git a/charts/karmada/templates/karmada-scheduler.yaml b/charts/karmada/templates/karmada-scheduler.yaml index acfc67511d1a..a690c5319600 100644 --- a/charts/karmada/templates/karmada-scheduler.yaml +++ b/charts/karmada/templates/karmada-scheduler.yaml @@ -50,8 +50,8 @@ spec: command: - /bin/karmada-scheduler - --kubeconfig=/etc/kubeconfig - - --bind-address=0.0.0.0 - - --secure-port=10351 + - --metrics-bind-address=0.0.0.0:8080 + - --health-probe-bind-address=0.0.0.0:10351 - --leader-elect-resource-namespace={{ $systemNamespace }} - --scheduler-estimator-ca-file=/etc/karmada/pki/server-ca.crt - --scheduler-estimator-cert-file=/etc/karmada/pki/karmada.crt @@ -66,8 +66,9 @@ spec: periodSeconds: 15 timeoutSeconds: 5 ports: - - containerPort: 10351 - name: http + - containerPort: 8080 + name: metrics + protocol: TCP volumeMounts: - name: karmada-certs mountPath: /etc/karmada/pki diff --git a/cmd/descheduler/app/descheduler.go b/cmd/descheduler/app/descheduler.go index b93c4c4be239..4c17a4033bc3 100644 --- a/cmd/descheduler/app/descheduler.go +++ b/cmd/descheduler/app/descheduler.go @@ -85,11 +85,6 @@ func NewDeschedulerCommand(stopChan <-chan struct{}) *cobra.Command { if they are failed to be scheduled for a period of time. It relies on karmada-scheduler-estimator to get replica status.`, RunE: func(_ *cobra.Command, _ []string) error { - // complete options - if err := opts.Complete(); err != nil { - return err - } - // validate options if errs := opts.Validate(); len(errs) != 0 { return errs.ToAggregate() diff --git a/cmd/descheduler/app/options/options.go b/cmd/descheduler/app/options/options.go index a604f02d2e3c..084b4a269941 100644 --- a/cmd/descheduler/app/options/options.go +++ b/cmd/descheduler/app/options/options.go @@ -17,8 +17,6 @@ limitations under the License. package options import ( - "net" - "strconv" "time" "github.com/spf13/pflag" @@ -31,8 +29,6 @@ import ( ) const ( - defaultBindAddress = "0.0.0.0" - defaultPort = 10358 defaultEstimatorPort = 10352 defaultDeschedulingInterval = 2 * time.Minute defaultUnschedulableThreshold = 5 * time.Minute @@ -49,12 +45,6 @@ type Options struct { LeaderElection componentbaseconfig.LeaderElectionConfiguration KubeConfig string Master string - // BindAddress is the IP address on which to listen for the --secure-port port. - // Deprecated: To specify the TCP address for serving health probes, use HealthProbeBindAddress instead. To specify the TCP address for serving prometheus metrics, use MetricsBindAddress instead. This will be removed in release 1.12+. - BindAddress string - // SecurePort is the port that the server serves at. - // Deprecated: To specify the TCP address for serving health probes, use HealthProbeBindAddress instead. To specify the TCP address for serving prometheus metrics, use MetricsBindAddress instead. This will be removed in release 1.12+. - SecurePort int KubeAPIQPS float32 // KubeAPIBurst is the burst to allow while talking with karmada-apiserver. @@ -84,7 +74,7 @@ type Options struct { // MetricsBindAddress is the TCP address that the server should bind to // for serving prometheus metrics. // It can be set to "0" to disable the metrics serving. - // Defaults to ":10358". + // Defaults to ":8080". MetricsBindAddress string // HealthProbeBindAddress is the TCP address that the server should bind to // for serving health probes @@ -117,12 +107,6 @@ func (o *Options) AddFlags(fs *pflag.FlagSet) { fs.StringVar(&o.LeaderElection.ResourceNamespace, "leader-elect-resource-namespace", util.NamespaceKarmadaSystem, "The namespace of resource object that is used for locking during leader election.") fs.StringVar(&o.KubeConfig, "kubeconfig", o.KubeConfig, "Path to karmada control plane kubeconfig file.") fs.StringVar(&o.Master, "master", o.Master, "The address of the Kubernetes API server. Overrides any value in KubeConfig. Only required if out-of-cluster.") - fs.StringVar(&o.BindAddress, "bind-address", defaultBindAddress, "The IP address on which to listen for the --secure-port port.") - fs.IntVar(&o.SecurePort, "secure-port", defaultPort, "The secure port on which to serve HTTPS.") - // nolint: errcheck - fs.MarkDeprecated("bind-address", "This flag is deprecated and will be removed in release 1.12+. Use --health-probe-bind-address and --metrics-bind-address instead. Note: In release 1.12+, these two addresses cannot be the same.") - // nolint: errcheck - fs.MarkDeprecated("secure-port", "This flag is deprecated and will be removed in release 1.12+. Use --health-probe-bind-address and --metrics-bind-address instead. Note: In release 1.12+, these two addresses cannot be the same.") fs.Float32Var(&o.KubeAPIQPS, "kube-api-qps", 40.0, "QPS to use while talking with karmada-apiserver.") fs.IntVar(&o.KubeAPIBurst, "kube-api-burst", 60, "Burst to use while talking with karmada-apiserver.") fs.DurationVar(&o.SchedulerEstimatorTimeout.Duration, "scheduler-estimator-timeout", 3*time.Second, "Specifies the timeout period of calling the scheduler estimator service.") @@ -135,18 +119,7 @@ func (o *Options) AddFlags(fs *pflag.FlagSet) { fs.StringVar(&o.SchedulerEstimatorServicePrefix, "scheduler-estimator-service-prefix", "karmada-scheduler-estimator", "The prefix of scheduler estimator service name") fs.DurationVar(&o.DeschedulingInterval.Duration, "descheduling-interval", defaultDeschedulingInterval, "Time interval between two consecutive descheduler executions. Setting this value instructs the descheduler to run in a continuous loop at the interval specified.") fs.DurationVar(&o.UnschedulableThreshold.Duration, "unschedulable-threshold", defaultUnschedulableThreshold, "The period of pod unschedulable condition. This value is considered as a classification standard of unschedulable replicas.") - fs.StringVar(&o.MetricsBindAddress, "metrics-bind-address", "", "The TCP address that the server should bind to for serving prometheus metrics(e.g. 127.0.0.1:10358, :10358). It can be set to \"0\" to disable the metrics serving. Defaults to 0.0.0.0:10358.") - fs.StringVar(&o.HealthProbeBindAddress, "health-probe-bind-address", "", "The TCP address that the server should bind to for serving health probes(e.g. 127.0.0.1:10358, :10358). It can be set to \"0\" to disable serving the health probe. Defaults to 0.0.0.0:10358.") + fs.StringVar(&o.MetricsBindAddress, "metrics-bind-address", ":8080", "The TCP address that the server should bind to for serving prometheus metrics(e.g. 127.0.0.1:8080, :8080). It can be set to \"0\" to disable the metrics serving. Defaults to 0.0.0.0:8080.") + fs.StringVar(&o.HealthProbeBindAddress, "health-probe-bind-address", ":10358", "The TCP address that the server should bind to for serving health probes(e.g. 127.0.0.1:10358, :10358). It can be set to \"0\" to disable serving the health probe. Defaults to 0.0.0.0:10358.") o.ProfileOpts.AddFlags(fs) } - -// Complete ensures that options are valid and marshals them if necessary. -func (o *Options) Complete() error { - if len(o.HealthProbeBindAddress) == 0 { - o.HealthProbeBindAddress = net.JoinHostPort(o.BindAddress, strconv.Itoa(o.SecurePort)) - } - if len(o.MetricsBindAddress) == 0 { - o.MetricsBindAddress = net.JoinHostPort(o.BindAddress, strconv.Itoa(o.SecurePort)) - } - return nil -} diff --git a/cmd/descheduler/app/options/validation_test.go b/cmd/descheduler/app/options/validation_test.go index 076bf44b10d6..dd0545cf8f66 100644 --- a/cmd/descheduler/app/options/validation_test.go +++ b/cmd/descheduler/app/options/validation_test.go @@ -59,8 +59,6 @@ func TestValidateKarmadaDescheduler(t *testing.T) { LeaderElection: componentbaseconfig.LeaderElectionConfiguration{ LeaderElect: false, }, - BindAddress: "127.0.0.1", - SecurePort: 9000, KubeAPIQPS: 40, KubeAPIBurst: 30, }} diff --git a/cmd/scheduler/app/options/options.go b/cmd/scheduler/app/options/options.go index 5b12986d0062..36f67044763e 100644 --- a/cmd/scheduler/app/options/options.go +++ b/cmd/scheduler/app/options/options.go @@ -35,8 +35,6 @@ import ( ) const ( - defaultBindAddress = "0.0.0.0" - defaultPort = 10351 defaultEstimatorPort = 10352 ) @@ -51,17 +49,11 @@ type Options struct { LeaderElection componentbaseconfig.LeaderElectionConfiguration KubeConfig string Master string - // BindAddress is the IP address on which to listen for the --secure-port port. - // Deprecated: To specify the TCP address for serving health probes, use HealthProbeBindAddress instead. To specify the TCP address for serving prometheus metrics, use MetricsBindAddress instead. This will be removed in release 1.12+. - BindAddress string - // SecurePort is the port that the server serves at. - // Deprecated: To specify the TCP address for serving health probes, use HealthProbeBindAddress instead. To specify the TCP address for serving prometheus metrics, use MetricsBindAddress instead. This will be removed in release 1.12+. - SecurePort int // MetricsBindAddress is the TCP address that the controller should bind to // for serving prometheus metrics. // It can be set to "0" to disable the metrics serving. - // Defaults to ":10351". + // Defaults to ":8080". MetricsBindAddress string // HealthProbeBindAddress is the TCP address that the controller should bind to @@ -153,14 +145,8 @@ func (o *Options) AddFlags(fs *pflag.FlagSet) { "of a leadership. This is only applicable if leader election is enabled.") fs.StringVar(&o.KubeConfig, "kubeconfig", o.KubeConfig, "Path to karmada control plane kubeconfig file.") fs.StringVar(&o.Master, "master", o.Master, "The address of the Kubernetes API server. Overrides any value in KubeConfig. Only required if out-of-cluster.") - fs.StringVar(&o.BindAddress, "bind-address", defaultBindAddress, "The IP address on which to listen for the --secure-port port.") - fs.IntVar(&o.SecurePort, "secure-port", defaultPort, "The secure port on which to serve HTTPS.") - // nolint: errcheck - fs.MarkDeprecated("bind-address", "This flag is deprecated and will be removed in release 1.12+. Use --health-probe-bind-address and --metrics-bind-address instead. Note: In release 1.12+, these two addresses cannot be the same.") - // nolint: errcheck - fs.MarkDeprecated("secure-port", "This flag is deprecated and will be removed in release 1.12+. Use --health-probe-bind-address and --metrics-bind-address instead. Note: In release 1.12+, these two addresses cannot be the same.") - fs.StringVar(&o.MetricsBindAddress, "metrics-bind-address", "", "The TCP address that the server should bind to for serving prometheus metrics(e.g. 127.0.0.1:9000, :10351). It can be set to \"0\" to disable the metrics serving. Defaults to 0.0.0.0:10351.") - fs.StringVar(&o.HealthProbeBindAddress, "health-probe-bind-address", "", "The TCP address that the server should bind to for serving health probes(e.g. 127.0.0.1:9000, :10351). It can be set to \"0\" to disable serving the health probe. Defaults to 0.0.0.0:10351.") + fs.StringVar(&o.MetricsBindAddress, "metrics-bind-address", ":8080", "The TCP address that the server should bind to for serving prometheus metrics(e.g. 127.0.0.1:8080, :8080). It can be set to \"0\" to disable the metrics serving. Defaults to 0.0.0.0:8080.") + fs.StringVar(&o.HealthProbeBindAddress, "health-probe-bind-address", ":10351", "The TCP address that the server should bind to for serving health probes(e.g. 127.0.0.1:10351, :10351). It can be set to \"0\" to disable serving the health probe. Defaults to 0.0.0.0:10351.") fs.Float32Var(&o.KubeAPIQPS, "kube-api-qps", 40.0, "QPS to use while talking with karmada-apiserver.") fs.IntVar(&o.KubeAPIBurst, "kube-api-burst", 60, "Burst to use while talking with karmada-apiserver.") fs.BoolVar(&o.EnableSchedulerEstimator, "enable-scheduler-estimator", false, "Enable calling cluster scheduler estimator for adjusting replicas.") diff --git a/cmd/scheduler/app/options/options_test.go b/cmd/scheduler/app/options/options_test.go index 2719db09a918..96487533f9ae 100644 --- a/cmd/scheduler/app/options/options_test.go +++ b/cmd/scheduler/app/options/options_test.go @@ -59,44 +59,6 @@ func TestAddFlags(t *testing.T) { } } -func TestOptionsComplete(t *testing.T) { - testCases := []struct { - name string - bindAddress string - securePort int - expectedMetrics string - expectedHealth string - }{ - { - name: "Default values", - bindAddress: defaultBindAddress, - securePort: defaultPort, - expectedMetrics: "0.0.0.0:10351", - expectedHealth: "0.0.0.0:10351", - }, - { - name: "Custom values", - bindAddress: "127.0.0.1", - securePort: 8080, - expectedMetrics: "127.0.0.1:8080", - expectedHealth: "127.0.0.1:8080", - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - opts := &Options{ - BindAddress: tc.bindAddress, - SecurePort: tc.securePort, - } - err := opts.Complete() - assert.NoError(t, err) - assert.Equal(t, tc.expectedMetrics, opts.MetricsBindAddress) - assert.Equal(t, tc.expectedHealth, opts.HealthProbeBindAddress) - }) - } -} - func TestOptionsFlagParsing(t *testing.T) { opts := NewOptions() fs := pflag.NewFlagSet("test", pflag.ContinueOnError) diff --git a/cmd/scheduler/app/options/validation.go b/cmd/scheduler/app/options/validation.go index 793ac542eb99..8dc83188ec7a 100644 --- a/cmd/scheduler/app/options/validation.go +++ b/cmd/scheduler/app/options/validation.go @@ -17,23 +17,9 @@ limitations under the License. package options import ( - "net" - "strconv" - "k8s.io/apimachinery/pkg/util/validation/field" ) -// Complete ensures that options are valid and marshals them if necessary. -func (o *Options) Complete() error { - if len(o.HealthProbeBindAddress) == 0 { - o.HealthProbeBindAddress = net.JoinHostPort(o.BindAddress, strconv.Itoa(o.SecurePort)) - } - if len(o.MetricsBindAddress) == 0 { - o.MetricsBindAddress = net.JoinHostPort(o.BindAddress, strconv.Itoa(o.SecurePort)) - } - return nil -} - // Validate checks Options and return a slice of found errs. func (o *Options) Validate() field.ErrorList { errs := field.ErrorList{} diff --git a/cmd/scheduler/app/options/validation_test.go b/cmd/scheduler/app/options/validation_test.go index ee451959dc55..9cc5e1c63143 100644 --- a/cmd/scheduler/app/options/validation_test.go +++ b/cmd/scheduler/app/options/validation_test.go @@ -34,8 +34,6 @@ func New(modifyOptions ModifyOptions) Options { LeaderElection: componentbaseconfig.LeaderElectionConfiguration{ LeaderElect: false, }, - BindAddress: "127.0.0.1", - SecurePort: 9000, KubeAPIQPS: 40, KubeAPIBurst: 30, EnableSchedulerEstimator: false, diff --git a/cmd/scheduler/app/scheduler.go b/cmd/scheduler/app/scheduler.go index 379c615f1874..475ee9482d5d 100644 --- a/cmd/scheduler/app/scheduler.go +++ b/cmd/scheduler/app/scheduler.go @@ -98,10 +98,6 @@ The scheduler determines which clusters are valid placements for each resource i constraints and available resources. The scheduler then ranks each valid cluster and binds the resource to the most suitable cluster.`, RunE: func(_ *cobra.Command, _ []string) error { - // complete options - if err := opts.Complete(); err != nil { - return err - } // validate options if errs := opts.Validate(); len(errs) != 0 { return errs.ToAggregate() diff --git a/docs/proposals/karmadactl/init-configuration-file.md b/docs/proposals/karmadactl/init-configuration-file.md new file mode 100644 index 000000000000..b06cb6acb6d6 --- /dev/null +++ b/docs/proposals/karmadactl/init-configuration-file.md @@ -0,0 +1,581 @@ +--- +title: Karmadactl Supports Configuration File Method for Production-Grade Installation and Deployment +authors: + - "@tiansuo114" +reviewers: + - "@liangyuanpeng" + - "@zhzhuang-zju" + - "@XiShanYongYe-Chang" + +approvers: + - "@liangyuanpeng" + - "@zhzhuang-zju" + - "@XiShanYongYe-Chang" + +creation-date: 2024-07-29 +--- + +# Karmadactl Supports Configuration File Method for Production-Grade Installation and Deployment + +## Summary + +The `karmadactl init` command is expected to support loading deployment parameters from a configuration file, simplifying the process of production-grade installation and reducing the complexity of managing numerous command-line flags. This enhancement allows users to streamline their multi-cluster management setups by utilizing predefined configuration files, similar to Kubernetes' `kubeadm`, making it easier to customize and maintain consistent deployments across environments. + +## Motivation + +As Karmada is widely used in multi-cluster management, the current `karmadactl init` command has accumulated dozens of command-line parameters. This not only increases the learning curve for users but also reduces the convenience of usage. With the ongoing development of the community, the number and complexity of these parameters are likely to continue growing, further impacting the user experience. + +### Goals + +- Provide the ability for `karmadactl` to construct `init` command parameters from a configuration file. + +### Non-Goals +- Add additional commands for karmadactl config, such as karmadactl config --init-config to add the ability to output default initialization config to the command + +## Proposal + +I believe that the flags in `karmada init` can mainly be divided into the following categories: + +- Certificate-related Options +- Etcd-related Options +- Karmada Control Plane-related Options +- Kubernetes-related Options +- Networking Options +- Image Options + +Therefore, I think the configuration file can be designed to focus on these parts, structuring the YAML accordingly, and incorporating the existing configuration items into them. Additionally, similar to the commonly used `Extra Arg` field in Kubernetes structures, an extra field can be added to store other general fields. When designing the fields, it is advisable to refer to the field names in the kubeadm configuration file as much as possible, to facilitate user familiarity and quick adoption. + +### User Stories (Optional) + +#### User Story 1: Simplified Cluster Deployment + +As a cloud platform operations engineer, Mr. Wang is responsible for managing multiple Karmada clusters for his company. Due to the large number of parameters that need to be set for each Karmada deployment, he finds the command line too complex and prone to errors. To improve deployment efficiency, Mr. Wang wants to use a predefined configuration file and streamline the cluster deployment process using the `karmadactl init --config` command. + +Now that Karmada supports loading deployment parameters from a configuration file, Mr. Wang only needs to create a configuration file and apply the parameters, reducing the complexity of manual command-line input and ensuring consistent deployments across different environments. This way, Mr. Wang can easily reuse the same configuration file, significantly simplifying his workflow. + +### Notes/Constraints/Caveats (Optional) + +### Risks and Mitigations + +## Design Details + + + +### CLI flags changes + +This proposal proposes new flags `config`in `karmadactl init` flag set. + +| name | shorthand | default | usage | +|--------|:---------:|---------|-----------------------------------| +| config | / | "" | Karmada init --config= | + +With these flag, we will: + +- When the user has a karmadactl init file, they can use the `--config` flag to specify that `karmadactl init` should read the configuration from the specified config file. Additionally, external cmd flags can still be used. In the design, if there are overlapping parts between the two, the configuration file configuration items will take higher priority. + +#### Example Performance: + +1. ##### Using a Default Configuration File for Karmada Initialization + + Assume that the user has prepared a default `karmada-init.yaml` configuration file that contains basic parameters for Karmada deployment, such as control plane components, etcd configuration, and image repositories. The user can start the Karmada initialization process with the following command: + + ``` + karmadactl init --config karmada-init.yaml + ``` + + yaml example + + ``` + apiVersion: config.karmada.io/v1alpha1 + kind: InitConfiguration + general: + namespace: "karmada-system" + kubeConfigPath: "/etc/karmada/kubeconfig" + kubeImageTag: "v1.21.0" + privateImageRegistry: "registry.k8s.io" + port: 32443 + karmadaControlPlane: + apiServer: + replicas: 3 + etcd: + local: + replicas: 3 + ``` + + If we configure without using a configuration file and rely solely on command-line flags, the command would look like this: + + ``` + karmadactl init \ + --namespace=karmada-system \ + --kubeconfig=/etc/karmada/kubeconfig \ + --kube-image-tag=v1.21.0 \ + --private-image-registry=registry.k8s.io \ + --port=32443 \ + --karmada-apiserver-replicas=3 \ + --etcd-replicas=3 + ``` + +2. ##### Specifying a Private Image Registry for Offline Deployment + + If a user is deploying Karmada in an offline environment, they may need to pull images from an internal private image registry. In this case, the user can specify the `privateImageRegistry` parameter in the configuration file and load it using the `--config` option. + + yaml example + + ``` + apiVersion: config.karmada.io/v1alpha1 + kind: InitConfiguration + general: + namespace: "karmada-system" + kubeConfigPath: "/etc/karmada/kubeconfig" + privateImageRegistry: "registry.company.com" + karmadaControlPlane: + apiServer: + replicas: 3 + etcd: + local: + replicas: 3 + ``` + + `karmadactl init` will pull all required Karmada images from the private image registry `registry.company.com`, ensuring that the deployment can proceed smoothly even in an offline or restricted network environment. + +### Implementation of Loading Configuration File + +Based on my classification of dozens of flags in the current `karmadactl init` command, I have designed the following data structure: + +```go +// KarmadaInitConfig defines the configuration for initializing Karmada +type KarmadaInitConfig struct { + metav1.TypeMeta `json:",inline" yaml:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty" yaml:"metadata,omitempty"` + + // Spec defines the desired state for initializing Karmada + // +optional + Spec KarmadaInitSpec `json:"spec,omitempty" yaml:"spec,omitempty"` +} + +// KarmadaInitSpec is the specification part of KarmadaInitConfig, containing all configurable options +type KarmadaInitSpec struct { + // Certificates configures the certificate information required by Karmada + // +optional + Certificates Certificates `json:"certificates,omitempty" yaml:"certificates,omitempty"` + + // Etcd configures the information of the Etcd cluster + // +optional + Etcd Etcd `json:"etcd,omitempty" yaml:"etcd,omitempty"` + + // ExternalEtcd configures the information of an external Etcd cluster + // +optional + ExternalEtcd *ExternalEtcd `json:"externalEtcd,omitempty" yaml:"externalEtcd,omitempty"` + + // HostCluster configures the information of the host cluster + // +optional + HostCluster HostCluster `json:"hostCluster,omitempty" yaml:"hostCluster,omitempty"` + + // Images configures image-related information + // +optional + Images Images `json:"images,omitempty" yaml:"images,omitempty"` + + // Components configures information about Karmada components + // +optional + Components KarmadaComponents `json:"components,omitempty" yaml:"components,omitempty"` + + // Networking configures network-related information + // +optional + Networking Networking `json:"networking,omitempty" yaml:"networking,omitempty"` + + // KarmadaDataPath configures the data directory for Karmada + // +optional + KarmadaDataPath string `json:"karmadaDataPath,omitempty" yaml:"karmadaDataPath,omitempty"` + + // KarmadaPkiPath configures the PKI directory for Karmada + // +optional + KarmadaPkiPath string `json:"karmadaPkiPath,omitempty" yaml:"karmadaPkiPath,omitempty"` + + // WaitComponentReadyTimeout configures the timeout (in seconds) for waiting for components to be ready + // +optional + WaitComponentReadyTimeout int `json:"waitComponentReadyTimeout,omitempty" yaml:"waitComponentReadyTimeout,omitempty"` +} + +// Certificates defines the configuration related to certificates +type Certificates struct { + // CACertFile is the path to the root CA certificate file + // +optional + CACertFile string `json:"caCertFile,omitempty" yaml:"caCertFile,omitempty"` + + // CAKeyFile is the path to the root CA key file + // +optional + CAKeyFile string `json:"caKeyFile,omitempty" yaml:"caKeyFile,omitempty"` + + // ExternalDNS is the list of external DNS names for the certificate + // +optional + ExternalDNS []string `json:"externalDNS,omitempty" yaml:"externalDNS,omitempty"` + + // ExternalIP is the list of external IPs for the certificate + // +optional + ExternalIP []string `json:"externalIP,omitempty" yaml:"externalIP,omitempty"` + + // ValidityPeriod is the validity period of the certificate + // +optional + ValidityPeriod metav1.Duration `json:"validityPeriod,omitempty" yaml:"validityPeriod,omitempty"` +} + +// Etcd defines the configuration of the Etcd cluster +type Etcd struct { + // Local indicates using a local Etcd cluster + // +optional + Local *LocalEtcd `json:"local,omitempty" yaml:"local,omitempty"` + + // External indicates using an external Etcd cluster + // +optional + External *ExternalEtcd `json:"external,omitempty" yaml:"external,omitempty"` +} + +// LocalEtcd defines the configuration of a local Etcd cluster +type LocalEtcd struct { + // CommonSettings contains common settings like image and resources + CommonSettings `json:",inline" yaml:",inline"` + + // DataPath is the data storage path for Etcd + // +optional + DataPath string `json:"dataPath,omitempty" yaml:"dataPath,omitempty"` + + // InitImage is the image for the Etcd init container + // +optional + InitImage Image `json:"initImage,omitempty" yaml:"initImage,omitempty"` + + // NodeSelectorLabels are the node selector labels for the Etcd pods + // +optional + NodeSelectorLabels map[string]string `json:"nodeSelectorLabels,omitempty" yaml:"nodeSelectorLabels,omitempty"` + + // PVCSize is the size of the PersistentVolumeClaim for Etcd + // +optional + PVCSize string `json:"pvcSize,omitempty" yaml:"pvcSize,omitempty"` + + // Replicas is the number of replicas in the Etcd cluster + // +optional + Replicas int32 `json:"replicas,omitempty" yaml:"replicas,omitempty"` + + // StorageMode is the storage mode for Etcd (e.g., emptyDir, hostPath, PVC) + // +optional + StorageMode string `json:"storageMode,omitempty" yaml:"storageMode,omitempty"` +} + +// ExternalEtcd defines the configuration of an external Etcd cluster +type ExternalEtcd struct { + // Endpoints are the server addresses of the external Etcd cluster + // +required + Endpoints []string `json:"endpoints" yaml:"endpoints"` + + // CAFile is the path to the CA certificate for the external Etcd cluster + // +optional + CAFile string `json:"caFile,omitempty" yaml:"caFile,omitempty"` + + // CertFile is the path to the client certificate for the external Etcd cluster + // +optional + CertFile string `json:"certFile,omitempty" yaml:"certFile,omitempty"` + + // KeyFile is the path to the client key for the external Etcd cluster + // +optional + KeyFile string `json:"keyFile,omitempty" yaml:"keyFile,omitempty"` + + // KeyPrefix is the key prefix used in the external Etcd cluster + // +optional + KeyPrefix string `json:"keyPrefix,omitempty" yaml:"keyPrefix,omitempty"` +} + +// HostCluster defines the configuration of the host cluster +type HostCluster struct { + // APIEndpoint is the API server address of the host cluster + // +optional + APIEndpoint string `json:"apiEndpoint,omitempty" yaml:"apiEndpoint,omitempty"` + + // Kubeconfig is the path to the kubeconfig file for the host cluster + // +optional + Kubeconfig string `json:"kubeconfig,omitempty" yaml:"kubeconfig,omitempty"` + + // Context is the context name in the kubeconfig for the host cluster + // +optional + Context string `json:"context,omitempty" yaml:"context,omitempty"` + + // Domain is the domain name of the host cluster + // +optional + Domain string `json:"domain,omitempty" yaml:"domain,omitempty"` + + // SecretRef refers to the credentials needed to access the host cluster + // +optional + SecretRef *LocalSecretReference `json:"secretRef,omitempty" yaml:"secretRef,omitempty"` +} + +// Images defines the configuration related to images +type Images struct { + // ImagePullPolicy is the pull policy for images + // +optional + ImagePullPolicy corev1.PullPolicy `json:"imagePullPolicy,omitempty" yaml:"imagePullPolicy,omitempty"` + + // ImagePullSecrets are the secrets used for pulling images + // +optional + ImagePullSecrets []string `json:"imagePullSecrets,omitempty" yaml:"imagePullSecrets,omitempty"` + + // KubeImageMirrorCountry is the country code for the Kubernetes image mirror + // +optional + KubeImageMirrorCountry string `json:"kubeImageMirrorCountry,omitempty" yaml:"kubeImageMirrorCountry,omitempty"` + + // KubeImageRegistry is the registry for Kubernetes images + // +optional + KubeImageRegistry string `json:"kubeImageRegistry,omitempty" yaml:"kubeImageRegistry,omitempty"` + + // KubeImageTag is the tag for Kubernetes images + // +optional + KubeImageTag string `json:"kubeImageTag,omitempty" yaml:"kubeImageTag,omitempty"` + + // PrivateRegistry is the private image registry + // +optional + PrivateRegistry *ImageRegistry `json:"privateRegistry,omitempty" yaml:"privateRegistry,omitempty"` +} + +// KarmadaComponents defines the configuration for all Karmada components +type KarmadaComponents struct { + // Etcd is the configuration for the Etcd component + // +optional + Etcd *Etcd `json:"etcd,omitempty" yaml:"etcd,omitempty"` + + // KarmadaAPIServer is the configuration for the Karmada API Server + // +optional + KarmadaAPIServer *KarmadaAPIServer `json:"karmadaAPIServer,omitempty" yaml:"karmadaAPIServer,omitempty"` + + // KarmadaAggregatedAPIServer is the configuration for the Karmada Aggregated API Server + // +optional + KarmadaAggregatedAPIServer *KarmadaAggregatedAPIServer `json:"karmadaAggregatedAPIServer,omitempty" yaml:"karmadaAggregatedAPIServer,omitempty"` + + // KubeControllerManager is the configuration for the Kube Controller Manager + // +optional + KubeControllerManager *KubeControllerManager `json:"kubeControllerManager,omitempty" yaml:"kubeControllerManager,omitempty"` + + // KarmadaControllerManager is the configuration for the Karmada Controller Manager + // +optional + KarmadaControllerManager *KarmadaControllerManager `json:"karmadaControllerManager,omitempty" yaml:"karmadaControllerManager,omitempty"` + + // KarmadaScheduler is the configuration for the Karmada Scheduler + // +optional + KarmadaScheduler *KarmadaScheduler `json:"karmadaScheduler,omitempty" yaml:"karmadaScheduler,omitempty"` + + // KarmadaWebhook is the configuration for the Karmada Webhook + // +optional + KarmadaWebhook *KarmadaWebhook `json:"karmadaWebhook,omitempty" yaml:"karmadaWebhook,omitempty"` +} + +// Networking defines network-related configuration +type Networking struct { + // Namespace is the Kubernetes namespace where Karmada is deployed + // +optional + Namespace string `json:"namespace,omitempty" yaml:"namespace,omitempty"` + + // Port is the port number for the Karmada API Server + // +optional + Port int32 `json:"port,omitempty" yaml:"port,omitempty"` +} + +// CommonSettings defines common settings for components +type CommonSettings struct { + // Image specifies the image to use for the component + Image `json:",inline" yaml:",inline"` + + // Replicas is the number of replicas for the component + // +optional + Replicas *int32 `json:"replicas,omitempty" yaml:"replicas,omitempty"` + + // Resources defines resource requests and limits for the component + // +optional + Resources corev1.ResourceRequirements `json:"resources,omitempty" yaml:"resources,omitempty"` + + // NodeSelector defines node selection constraints + // +optional + NodeSelector map[string]string `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty"` + + // Tolerations define pod tolerations + // +optional + Tolerations []corev1.Toleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty"` + + // Affinity defines pod affinity rules + // +optional + Affinity *corev1.Affinity `json:"affinity,omitempty" yaml:"affinity,omitempty"` +} + +// Image defines image information +type Image struct { + // ImageRepository is the repository for the image + // +optional + ImageRepository string `json:"imageRepository,omitempty" yaml:"imageRepository,omitempty"` + + // ImageTag is the tag for the image + // +optional + ImageTag string `json:"imageTag,omitempty" yaml:"imageTag,omitempty"` +} + +// KarmadaAPIServer defines the configuration for the Karmada API Server +type KarmadaAPIServer struct { + CommonSettings `json:",inline" yaml:",inline"` + + // AdvertiseAddress is the address advertised by the API server + // +optional + AdvertiseAddress string `json:"advertiseAddress,omitempty" yaml:"advertiseAddress,omitempty"` + + // ServiceType is the type of service for the API server (e.g., ClusterIP, NodePort) + // +optional + ServiceType corev1.ServiceType `json:"serviceType,omitempty" yaml:"serviceType,omitempty"` + + // ServiceAnnotations are annotations added to the API server service + // +optional + ServiceAnnotations map[string]string `json:"serviceAnnotations,omitempty" yaml:"serviceAnnotations,omitempty"` +} + +// KarmadaAggregatedAPIServer defines the configuration for the Karmada Aggregated API Server +type KarmadaAggregatedAPIServer struct { + CommonSettings `json:",inline" yaml:",inline"` +} + +// KubeControllerManager defines the configuration for the Kube Controller Manager +type KubeControllerManager struct { + CommonSettings `json:",inline" yaml:",inline"` +} + +// KarmadaControllerManager defines the configuration for the Karmada Controller Manager +type KarmadaControllerManager struct { + CommonSettings `json:",inline" yaml:",inline"` +} + +// KarmadaScheduler defines the configuration for the Karmada Scheduler +type KarmadaScheduler struct { + CommonSettings `json:",inline" yaml:",inline"` +} + +// KarmadaWebhook defines the configuration for the Karmada Webhook +type KarmadaWebhook struct { + CommonSettings `json:",inline" yaml:",inline"` +} + +// LocalSecretReference is a reference to a secret within the same namespace +type LocalSecretReference struct { + // Name is the name of the referenced secret + Name string `json:"name,omitempty" yaml:"name,omitempty"` +} + +// ImageRegistry represents an image registry +type ImageRegistry struct { + // Registry is the hostname of the image registry + // +required + Registry string `json:"registry" yaml:"registry"` +} +``` + +After reading the logic, the information in the configuration file will be placed into the `InitConfiguration` structure, and then the fields will be filled into the `CommandInitOption` structure used to control the `karmadactl init` process. This way, the deployment of the Karmada control plane can proceed according to the original logic. + +For the design of the configuration file, I think it can be designed similarly to the config structure in `kubeadm init`. I will provide a YAML file example below. To provide users with a configuration file, I think we can prepare a default configuration file example and make it a sub-command in `karmadactl config`. When the feature design is complete and the official documentation is updated, an example can also be demonstrated in the official documentation. + +Example configuration file: + +```yaml +apiVersion: v1alpha1 +kind: KarmadaInitConfig +metadata: + name: karmada-init +spec: + certificates: + caCertFile: "/etc/karmada/pki/ca.crt" + caKeyFile: "/etc/karmada/pki/ca.key" + externalDNS: + - "localhost" + - "example.com" + externalIP: + - "192.168.1.2" + - "172.16.1.2" + validityPeriod: "8760h0m0s" + etcd: + local: + image: + imageRepository: "karmada/etcd" + imageTag: "latest" + dataPath: "/var/lib/karmada-etcd" + initImage: + imageRepository: "alpine" + imageTag: "3.19.1" + nodeSelectorLabels: + karmada.io/etcd: "true" + pvcSize: "5Gi" + replicas: 3 + storageMode: "PVC" + hostCluster: + apiEndpoint: "https://kubernetes.example.com" + kubeconfig: "/root/.kube/config" + context: "karmada-host" + domain: "cluster.local" + images: + imagePullPolicy: "IfNotPresent" + imagePullSecrets: + - "PullSecret1" + - "PullSecret2" + kubeImageMirrorCountry: "cn" + kubeImageRegistry: "registry.cn-hangzhou.aliyuncs.com/google_containers" + kubeImageTag: "v1.29.6" + privateRegistry: + registry: "my.private.registry" + components: + karmadaAPIServer: + image: + imageRepository: "karmada/kube-apiserver" + imageTag: "v1.29.6" + replicas: 1 + advertiseAddress: "192.168.1.100" + serviceType: "NodePort" + karmadaAggregatedAPIServer: + image: + imageRepository: "karmada/karmada-aggregated-apiserver" + imageTag: "v0.0.0-master" + replicas: 1 + kubeControllerManager: + image: + imageRepository: "karmada/kube-controller-manager" + imageTag: "v1.29.6" + replicas: 1 + karmadaControllerManager: + image: + imageRepository: "karmada/karmada-controller-manager" + imageTag: "v0.0.0-master" + replicas: 1 + karmadaScheduler: + image: + imageRepository: "karmada/karmada-scheduler" + imageTag: "v0.0.0-master" + replicas: 1 + karmadaWebhook: + image: + imageRepository: "karmada/karmada-webhook" + imageTag: "v0.0.0-master" + replicas: 1 + networking: + namespace: "karmada-system" + port: 32443 + storage: + storageClassName: "fast-storage" + karmadaDataPath: "/etc/karmada" + karmadaPkiPath: "/etc/karmada/pki" + waitComponentReadyTimeout: 120 +``` + +## Alternatives + + + + \ No newline at end of file diff --git a/docs/proposals/migration-rollback-protection/README.md b/docs/proposals/migration-rollback-protection/README.md new file mode 100644 index 000000000000..3e0f4fe01c80 --- /dev/null +++ b/docs/proposals/migration-rollback-protection/README.md @@ -0,0 +1,324 @@ +--- +title: Migration Rollback Protection +authors: +- "@CharlesQQ" +reviewers: +- "@RainbowMango" +- "@XiShanYongYe-Chang" +- "@chaosi-zju" +- "@whitewindmills" +- "@grosser" +approvers: +- "@RainbowMango" + +creation-date: 2024-07-01 + +--- + +# Migration Rollback Protection + +## Summary + +Provide a deletion strategy for federated resources, allowing users to choose whether to synchronously delete the workloads in member clusters when deleting workloads at the federation level. + +This setting is particularly useful during workload migration scenarios to ensure that rollback can occur quickly without affecting the workloads running on the member clusters. + +## Motivation + +The current behavior of the Karmada system is that when a user deletes resources from the Karmada control plane, the distributed resources in the member clusters are also deleted synchronously. However, in certain scenarios, such as workload migration scenarios, users may wish to retain the workloads in the member clusters. + +### Goals + +- Provide the capability to retain resources in member clusters when deleting control plane resources, while at the same time, clean up labels/annotations and other information attached to member cluster resources by the Karmada system. + +### Non-Goals + +- Define different resource deletion strategies for different member clusters. +- Provide the capability to retain resources in member clusters for Karmada federated resources, such as cronfederatedhpa, federatedhpa, federatedresourcequota, etc. +- Other deletion strategies, such as retaining work objects in the Karmada control plane. +- Cascading deletion control of resources in member clusters. + +## Proposal + +### User Stories (Optional) + +#### Story 1 + +As an administrator, I hope that during the process of migrating workloads to Karmada, if any unexpected situations arise, such as the cloud platform being unable to publish the application or the Pod encountering unexpected issues, it is necessary to use the rollback mechanism provided by Karmada to immediately revert to the state before the migration in order to quickly stop the loss. + +### Notes/Constraints/Caveats (Optional) + +- For resources that are not distributed through PropagationPolicy, such as namespace, it is not possible to specify a deletion policy. Unless the controller for automatic resource propagation is disabled, and users are required to propagate resources through PP (PropagationPolicy) / CPP (ClusterPropagationPolicy). +- In one policy vs multi resource scene, we can't execute delete policy just by per resource. + +### Risks and Mitigations + +## Design Details + +### Extend the fields of PropagationPolicy/ClusterPropagationPolicy + +By extending the `PropagationPolicy/ClusterPropagationPolicy` API, a new bool field `PreserveResourcesOnDeletion` is introduced. The field will be transparently transmitted to `ResourceBinding/ClusterResourceBinding` and the work object. Finally, the execution controller determines the deletion strategy based on the value of the work field. + +#### API changes + +PropagationPolicy/ClusterPropagationPolicy +```go +type PropagationSpec struct { + ... + + // PreserveResourcesOnDeletion controls whether resources should be preserved on the + // member clusters when the resource template is deleted. + // If set to true, resources will be preserved on the member clusters. + // Default is false, which means resources will be deleted along with the resource template. + // + // This setting is particularly useful during workload migration scenarios to ensure + // that rollback can occur quickly without affecting the workloads running on the + // member clusters. + // + // Additionally, this setting applies uniformly across all member clusters and will not + // selectively control preservation on only some clusters. + // + // Note: This setting does not apply to the deletion of the policy itself. + // When the policy is deleted, the resource templates and their corresponding + // propagated resources in member clusters will remain unchanged unless explicitly deleted. + // + // +optional + PreserveResourcesOnDeletion *bool `json:"preserveResourcesOnDeletion,omitempty"` +} +``` + +ResourceBinding/ClusterResourceBinding +```go +type ResourceBindingSpec struct { + ... + + // PreserveResourcesOnDeletion controls whether resources should be preserved on the + // member clusters when the binding object is deleted. + // If set to true, resources will be preserved on the member clusters. + // Default is false, which means resources will be deleted along with the binding object. + // This setting applies to all Work objects created under this binding object. + // +optional + PreserveResourcesOnDeletion *bool `json:"preserveResourcesOnDeletion,omitempty"` +} +``` + +Work +```go +// WorkSpec defines the desired state of Work. +type WorkSpec struct { + ... + + // PreserveResourcesOnDeletion controls whether resources should be preserved on the + // member cluster when the Work object is deleted. + // If set to true, resources will be preserved on the member cluster. + // Default is false, which means resources will be deleted along with the Work object. + // +optional + PreserveResourcesOnDeletion *bool `json:"preserveResourcesOnDeletion,omitempty"` +} +``` + +#### Controller logic changes + +The `detector` needs to pass the `PreserveResourcesOnDeletion` from PropagationPolicy/ClusterPropagationPolicy to ResourceBinding/ClusterResourceBinding. + +The `binding-controller` needs to pass the `PreserveResourcesOnDeletion` from ResourceBinding to Work. + +The `cluster-resource-binding-controller` needs to pass the `PreserveResourcesOnDeletion` from ClusterResourceBinding to Work. + +The `execution-controller` needs to perform resource deletion based on the `PreserveResourcesOnDeletion` field in Work. + +#### User usage example + +Set the cascade deletion policy to orphan: + +```yaml +apiVersion: policy.karmada.io/v1alpha1 +kind: PropagationPolicy +metadata: + name: nginx-propagation +spec: + resourceSelectors: + - apiVersion: apps/v1 + kind: Deployment + name: nginx + preserveResourcesOnDeletion: true +``` + +#### Q&A: + +1. The resource deletion policy of dependent resources and main resources does not force binding. + +Since dependent resources may be shared by multiple resource templates, in this case it is difficult to decide which deletion strategy should be used for the dependent resources; it is not forced to be bound to the main resource, and is left to the user to decide, with greater flexibility and scalability. + +2. Whether the workload of the member cluster only clears the `karmada.io/managed` label is enough? + +Logically, after the label `karmada.io/managed` is cleared, the relationship with karmada is broken. + +### Test Plan + +TODO + +## Alternatives + +### Extended by Annotation + +#### API changes + +A new Annotation is added for users to include on resource templates in the Karmada control plane, with the key value: `resourcetemplate.karmada.io/cascadedeletion`. To increase extensibility, the value is of the string enumeration type, and currently supported types include: +- orphan: Retain resources in member clusters and clean up labels/annotations and other information attached to member cluster resources by the Karmada system. + +When users do not specify this annotation, the system's current behavior is to synchronously delete resources in member clusters. + +#### Controller logic changes + +The `resourcetemplate.karmada.io/cascadedeletion` annotation added by users to the resource template will be propagated to `work.spec.workload.manifests`. When the resource template is deleted, the `execution-controller` will execute the logic for deleting the work object. It can parse the value of the `resourcetemplate.karmada.io/cascadedeletion` annotation from `work.spec.workload.manifests` and perform the following judgment logic: +- If the target annotation does not exist, synchronously delete the resources in the member clusters. +- If the target annotation value is `orphan`, retain the resources in the member clusters and clean up the labels/annotations and other information attached to the member cluster resources by the Karmada system. + +![resource-delete-policy](./statics/resource-delete-policy.png) + +#### User usage example + +Set the cascade deletion policy to orphan + +```yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + propagationpolicy.karmada.io/name: foo + propagationpolicy.karmada.io/namespace: default + resourcetemplate.karmada.io/cascadedeletion: orphan + ... +``` + +In this approach, there is also a branch idea of adding a `CascadeDeletion` field in the Work API, so there is no need to parse `work.spec.workload.manifests`. + +Work +```go +// WorkSpec defines the desired state of Work. +type WorkSpec struct { + ... + + // CascadeDeletion Declare the cascade deletion strategy. The default value is null, which is equivalent to background. + // +optional + CascadeDeletion *CascadeDeletionPolicy `json:"cascadeDeletion,omitempty"` +} +``` + +The `binding-controller` needs to set the `CascadeDeletion` field in the Work object according the resource annotation. + +The `cluster-resource-binding-controller` needs to set the `CascadeDeletion` field in the Work object according the resource annotation. + +The `execution-controller` needs to perform resource deletion based on the `CascadeDeletion` field in Work. + +> Note: For namespace resources, the `namespace-sync-controller` in the Karmada system automatically propagates each new namespace created by users to member clusters, and the system achieves this functionality by directly generating work objects. For the scheme of adding new API fields in the work, the `namespace-sync-controller` needs to be responsible for processing that field. + +#### Advantages & Disadvantages + +Disadvantages: +- Using annotations as an API is somewhat informal. + +### Extended by adding a new CRD + +A new CRD resource is added, through which users define the CR (Custom Resource) of this CRD to describe the resource deletion strategy for the target resource. + +#### API changes + +```go +type CascadeDeletionPolicy struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // Spec represents the desired cascadeDeletion Behavior. + Spec CascadeDeletionSpec `json:"spec"` + + // Status represents the status of cascadeDeletion. + // +optional + Status CascadeDeletionStatus `json:"status,omitempty"` +} + +type CascadeDeletionSpec struct { + // CascadeDeletion Declare the cascade deletion strategy. The default value is null, which is equivalent to background. + // +optional + CascadeDeletion *CascadeDeletionPolicy `json:"cascadeDeletion,omitempty"` + // ResourceSelectors used to select resources. + // Nil or empty selector is not allowed and doesn't mean match all kinds + // of resources for security concerns that sensitive resources(like Secret) + // might be accidentally propagated. + // +required + // +kubebuilder:validation:MinItems=1 + ResourceSelectors []ResourceSelector `json:"resourceSelectors"` +} + +// ResourceSelector the resources will be selected. +type ResourceSelector struct { + // APIVersion represents the API version of the target resources. + // +required + APIVersion string `json:"apiVersion"` + + // Kind represents the Kind of the target resources. + // +required + Kind string `json:"kind"` + + // Namespace of the target resource. + // Default is empty, which means inherit from the parent object scope. + // +optional + Namespace string `json:"namespace,omitempty"` + + // Name of the target resource. + // Default is empty, which means selecting all resources. + // +optional + Name string `json:"name,omitempty"` + + // A label query over a set of resources. + // If name is not empty, labelSelector will be ignored. + // +optional + LabelSelector *metav1.LabelSelector `json:"labelSelector,omitempty"` +} + +type CascadeDeletionStatus struct { + ... +} +``` + +Work +```go +// WorkSpec defines the desired state of Work. + type WorkSpec struct { + // CascadeDeletion Declare the cascade deletion strategy. The default value is null, which is equivalent to background. + // +optional + CascadeDeletion *CascadeDeletionPolicy `json:"cascadeDeletion,omitempty"` + + ... +} +``` + +#### Controller logic changes + +The `binding-controller`/`cluster-resource-binding-controller` checks for the existence of a `CascadeDeletionPolicy` associated with the target resource when creating or updating the Work object. If found, the deletion policy is synchronized into the Work object. + +The `execution-controller` carries out resource deletion based on the `CascadeDeletion` field in the Work object. + +#### User usage example + +Set the cascade deletion policy to orphan: + +```yaml +apiVersion: policy.karmada.io/v1alpha1 +kind: CascadeDeletionPolicy +metadata: + name: foo +spec: + cascadeDeletion: orphan + resourceSelectors: + - apiVersion: apps/v1 + kind: Deployment + name: foo + namespace: default +``` + +#### Advantages & Disadvantages + +Disadvantages: +- It increases the learning cost for users and results in an increased number of resources in the Karmada control plane. \ No newline at end of file diff --git a/docs/proposals/migration-rollback-protection/statics/resource-delete-policy.png b/docs/proposals/migration-rollback-protection/statics/resource-delete-policy.png new file mode 100644 index 000000000000..b9e1a5fd8f78 Binary files /dev/null and b/docs/proposals/migration-rollback-protection/statics/resource-delete-policy.png differ diff --git a/docs/proposals/structured-configuration/README.md b/docs/proposals/structured-configuration/README.md new file mode 100644 index 000000000000..969c016c1f7f --- /dev/null +++ b/docs/proposals/structured-configuration/README.md @@ -0,0 +1,543 @@ +--- +title: Structured configuration overrider +authors: +- "@Patrick0308" +- "@sophiefeifeifeiya" +reviewers: +- "@chaunceyjiang" +approvers: +- "@chaunceyjiang" +creation-date: 2024-08-12 +--- + +# Structured configuration overrider +## Summary +The proposal introduces a new feature that allows users to partially override values inside JSON and YAML fields. This is achieved using JSON patch operation. This design enables users to override the values within JSON/YAML fields partially, rather than replacing a whole JSON/YAML fields with `PlaintextOverrider`. Currently, `PlaintextOverrider` applies JSON patch operations to whole fields, rather than specific values within fields, making it unsuitable for cases where users need to override individual values within those fields. + +## Motivation +### Goals ++ Allow users to override specific values inside JSON and YAML in resources (e.g, configmap). ++ Support JSON patch (“add”, “remove”, “replace”) for both JSON and YAML. +### Non-Goals ++ Support all data formats, like XML. ++ Support every operation of YAML and JSON. +## Proposal +### User Stories (Optional) +#### Story 1 +As an administrator and developer, I want to update specific values within JSON/YAML in resources to without replacing the entire configuration, ensuring that my changes are minimal and targeted. +### Notes/Constraints/Caveats (Optional) +Illustrated in YAML Implementation. +### Risks and Mitigations +## Design Details +### API Change +```go +type Overriders struct { + ... + // FieldOverrider represents the rules dedicated to modifying a specific field in any Kubernetes resource. + // This allows changing a single field within the resource with multiple operations. + // It is designed to handle structured field values such as those found in ConfigMaps or Secrets. + // The current implementation supports JSON and YAML formats, but can easily be extended to support XML in the future. + // +optional + FieldOverrider []FieldOverrider `json:"fieldOverrider,omitempty"` +} + +type FieldOverrider struct { + // FieldPath specifies the initial location in the instance document where the operation should take place. + // The path uses RFC 6901 for navigating into nested structures. For example, the path "/data/db-config.yaml" + // specifies the configuration data key named "db-config.yaml" in a ConfigMap: "/data/db-config.yaml". + // +required + FieldPath string `json:"fieldPath"` + + // JSON represents the operations performed on the JSON document specified by the FieldPath. + // +optional + JSON []JSONPatchOperation `json:"json,omitempty"` + + // YAML represents the operations performed on the YAML document specified by the FieldPath. + // +optional + YAML []YAMLPatchOperation `json:"yaml,omitempty"` +} + +// JSONPatchOperation represents a single field modification operation for JSON format. +type JSONPatchOperation struct { + // SubPath specifies the relative location within the initial FieldPath where the operation should take place. + // The path uses RFC 6901 for navigating into nested structures. + // +required + SubPath string `json:"subPath"` + + // Operator indicates the operation on target field. + // Available operators are: "add", "remove", and "replace". + // +kubebuilder:validation:Enum=add;remove;replace + // +required + Operator OverriderOperator `json:"operator"` + + // Value is the new value to set for the specified field if the operation is "add" or "replace". + // For "remove" operation, this field is ignored. + // +optional + Value apiextensionsv1.JSON `json:"value,omitempty"` +} + +// YAMLPatchOperation represents a single field modification operation for YAML format. +type YAMLPatchOperation struct { + // SubPath specifies the relative location within the initial FieldPath where the operation should take place. + // The path uses RFC 6901 for navigating into nested structures. + // +required + SubPath string `json:"subPath"` + + // Operator indicates the operation on target field. + // Available operators are: "add", "remove", and "replace". + // +kubebuilder:validation:Enum=add;remove;replace + // +required + Operator OverriderOperator `json:"operator"` + + // Value is the new value to set for the specified field if the operation is "add" or "replace". + // For "remove" operation, this field is ignored. + // +optional + Value apiextensionsv1.JSON `json:"value,omitempty"` +} + +// OverriderOperator is the set of operators that can be used in an overrider. +type OverriderOperator string + +// These are valid overrider operators. +const ( + OverriderOpAdd OverriderOperator = "add" + OverriderOpRemove OverriderOperator = "remove" + OverriderOpReplace OverriderOperator = "replace" +) +``` + +### User usage example +For example, consider a ConfigMap with the following data in member cluster: +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: example-config +data: + db-config.yaml: | + database: + host: localhost + port: 3306 +``` +The following is OverridePolicy which uses FieldOverrider: +```yaml +apiVersion: karmada.io/v1alpha1 +kind: OverridePolicy +metadata: + name: example-configmap-override +spec: + resourceSelectors: + - apiVersion: v1 + kind: ConfigMap + name: example-config + overrideRules: + - overriders: + fieldOverrider: + - fieldPath: /data/db-config.yaml + yaml: + - subPath: /database/host + operator: replace + value: "remote-db.example.com" + - subPath: /database/port + operator: replace + value: "3307" +``` +After we apply this policy, we can modify the db-config.yaml +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: example-config +data: + db-config.yaml: | + database: + host: remote-db.example.com + port: 3307 +``` + +### YAML Implementation + +We choose Plan1. +(1) Plan1 is easy to implement by converting YAML directly to JSON, while Plan2 has to encapsulate ytt's grammar as if it is JSON patch. +(2) Plan1 and Plan2 both have the same issues illustrated in the following. + +#### Plan 1: directly convert to and back from JSON (chosen) +If YAML is directly converted to JSON and then converted back using (‘sigs.k8s.io/yaml’), some data type information might be lost. +1. Dates and Times + ```yaml + dob: 1979-05-27T07:32:00Z + date_of_birth: 1979-05-27 + + dob: time.Time + date_of_birth: time.Time + + # after transformation + + dob: string + date_of_birth: string + ``` +2. Supports anchors (&) and aliases (*) to reference and reuse values + ```yaml + default: &default + name: Alice + age: 30 + employee1: + <<: *default + role: Developer + employee2: + <<: *default + role: Designer + + # after transformation + + default: + age: 30 + name: Alice + employee1: + age: 30 + name: Alice + role: Developer + employee2: + age: 30 + name: Alice + role: Designer + ``` + **Can be applied if:** + (1) Do not consider the situation described above + (2) Write cases to deal with different types **(large maintenance costs)** + +#### Plan 2: ytt (Aborted) +Use the third party libraries **ytt**, supporting the overlays https://carvel.dev/ytt/docs/v0.50.x/ytt-overlays/ to implement and json operation similar operations. Implementation has its own specific syntax for ytt. +Documents: https://carvel.dev/ytt/docs/v0.50.x/ytt-overlays/ +Joining methods: https://github.com/carvel-dev/ytt/blob/develop/examples/integrating-with-ytt/apis.md#as-a-go-module +After testing, it has the same problem as the **Plan 1**. +1. Dates and Times +```yaml +dob: 1979-05-27T07:32:00Z +date_of_birth: 1979-05-27 + +# after transformation + +dob: "1979-05-27T07:32:00Z" +date_of_birth: "1979-05-27" +``` + +2. Supports anchors (&) and aliases (*) to reference and reuse values +```yaml +default: &default + name: Alice + age: 30 +employee1: + <<: *default + role: Developer +employee2: + <<: *default + role: Designer + +# after transformation + +default: + name: Alice + age: 30 +employee1: + name: Alice + age: 30 + role: Developer +employee2: + name: Alice + age: 30 + role: Designer +number: 12345 +string: "6789" +``` +**Can be applied if:** +(1) Do not consider the situation described above +(2) Ability to maintain functionality despite incompatibilities arising from encapsulating ytt as JSON operations later on. + +### Test Plan +#### UT +- Add unit tests to cover the new functions. +#### E2E ++ Write proposal in `coverage_docs/overridepolicy_test.md` : deployment `FieldOverrider` testing; ++ Use ginkgo to complete the code `overridepolicy_test.go`. + +## Alternatives +There are three API designs to achieve this: +### (1) Each data format has different name with same struct +```go +type Overriders struct { + ... + // JSONPlaintextOverrider represents the rules dedicated to handling json object overrides + // +optional + JSONPlaintextOverrider []JSONPlaintextOverrider `json:"jsonPlaintextOverrider,omitempty"` + // YAMLPlaintextOverrider represents the rules dedicated to handling yaml object overrides + // +optional + YAMLPlaintextOverrider []YAMLPlaintextOverrider `json:"yamlPlaintextOverrider,omitempty"` +} + +type JSONPlaintextOverrider struct { + // Path indicates the path of target field + Path string `json:"path"` + // JSONPatch represents json patch rules defined with plaintext overriders. + Patch []Patch `json:"patch"` + // MergeValue t represents the object value to be merged into the object. + MergeValue apiextensionsv1.JSON `json:"mergeValue"` + // MergeRawValue represents the raw, original format data (e.g., YAML, JSON) to be merged into the object. + MergeRawValue string `json:"mergeRawValue"` +} + +type YAMLPlaintextOverrider struct { + // Path indicates the path of target field + Path string `json:"path"` + // JSONPatch represents json patch rules defined with plaintext overriders. + Patch []Patch `json:"patch"` + // MergeValue t represents the object value to be merged into the object. + MergeValue apiextensionsv1.JSON `json:"mergeValue"` + // MergeRawValue represents the raw, original format data (e.g., YAML, JSON) to be merged into the object. + MergeRawValue string `json:"mergeRawValue"` +} + +type Patch struct { + // Path indicates the path of target field + Path string `json:"path"` + // From indicates the path of original field when operator is move and copy + From string `json:"from"` + // Operator indicates the operation on target field. + // Available operators are: add, replace and remove. + // +kubebuilder:validation:Enum=add;remove;replace;move;test;copy + Operator PatchOperator `json:"operator"` + // Value to be applied to target field. + // Must be empty when operator is Remove. + // +optional + Value apiextensionsv1.JSON `json:"value,omitempty"` +} + +// PatchOperator is the set of operators that can be used in an overrider. +type PatchOperator string + +// These are valid patch operators. +const ( + PatchOpAdd PatchOperator = "add" + PatchOpRemove PatchOperator = "remove" + PatchOpReplace PatchOperator = "replace" + PatchOpMove PatchOperator = "move" + PatchOpTest PatchOperator = "test" + PatchOpCopy PatchOperator = "copy" +) +``` + +### (2) Each data format has different name with same struct + +```go +type Overriders struct { + ... + // JSONPlaintextOverrider represents the rules dedicated to handling json object overrides + // +optional + JSONPlaintextOverrider []PlaintextObjectOverrider `json:"jsonPlaintextOverrider,omitempty"` + // YAMLPlaintextOverrider represents the rules dedicated to handling yaml object overrides + // +optional + YAMLPlaintextOverrider []PlaintextObjectOverrider `json:"yamlPlaintextOverrider,omitempty"` + // TOMLPlaintextOverrider represents the rules dedicated to handling toml object overrides + // +optional + TOMLPlaintextOverrider []PlaintextObjectOverrider `json:"tomlPlaintextOverrider,omitempty"` + // XMLPlaintextOverrider represents the rules dedicated to handling xml object overrides + // +optional + XMLPlaintextOverrider []PlaintextObjectOverrider `json:"xmlPlaintextOverrider,omitempty"` +} + +type PlaintextObjectOverrider struct { + // Path indicates the path of target field + Path string `json:"path"` + // JSONPatch represents json patch rules defined with plaintext overriders. + JSONPatch []JSONPatch `json:"jsonPatch"` + // MergeValue t represents the object value to be merged into the object. + MergeValue apiextensionsv1.JSON `json:"mergeValue"` + // MergeRawValue represents the raw, original format data (e.g., YAML, JSON) to be merged into the object. + MergeRawValue string `json:"mergeRawValue"` +} + +type JSONPatch struct { + // Path indicates the path of target field + Path string `json:"path"` + // From indicates the path of original field when operator is move and copy + From string `json:"from"` + // Operator indicates the operation on target field. + // Available operators are: add, replace and remove. + // +kubebuilder:validation:Enum=add;remove;replace;move;test;copy + Operator JSONPatchOperator `json:"operator"` + // Value to be applied to target field. + // Must be empty when operator is Remove. + // +optional + Value apiextensionsv1.JSON `json:"value,omitempty"` +} + +// PatchOperator is the set of operators that can be used in an overrider. +type JSONPatchOperator string + +// These are valid patch operators. +const ( + PatchOpAdd JSONPatchOperator = "add" + PatchOpRemove JSONPatchOperator = "remove" + PatchOpReplace JSONPatchOperator = "replace" + PatchOpMove JSONPatchOperator = "move" + PatchOpTest JSONPatchOperator = "test" + PatchOpCopy JSONPatchOperator = "copy" +) +``` + +### (3) Enumeration + +```go +type Overriders struct { + ... + // PlaintextObjectOverrider represents the rules dedicated to handling yaml object overrides + // +optional + PlaintextObjectOverrider []PlaintextObjectOverrider `json:"yamlPlaintextOverrider,omitempty"` +} + +type PlaintextObjectOverrider struct { + // Path indicates the path of target field + Path string `json:"path"` + // DataFormat indicates the type of data formats type to be modified + DataFormat DataFormat `json:"dataFormat"` + // JSONPatch represents json patch rules defined with plaintext overriders. + JSONPatch []JSONPatch `json:"jsonPatch"` + // MergeValue t represents the object value to be merged into the object. + MergeValue apiextensionsv1.JSON `json:"mergeValue"` + // MergeRawValue represents the raw, original format data (e.g., YAML, JSON) to be merged into the object. + MergeRawValue string `json:"mergeRawValue"` +} + +type DataFormat string + +const ( + yaml DataFormat = "yaml" + json DataFormat = "json" + toml DataFormat = "toml" +) +``` + +### Analysis of 3 Implementations + +(1) Each data format has different struct (**Easiest to extend**) +json -> json * op -> json -> JSONPlaintextOverrider []JSONPlaintextOverrider +yaml -> yaml * op -> yaml -> YAMLPlaintextOverrider []YAMLPlaintextOverrider +xml -> xml * op -> xml -> XMLPlaintextOverrider []XMLPlaintextOverrider +... +This one is designed for **native operations/JSON operations** for each data format. +For example, json has 5 json operations, yaml has 3 yaml operations, and xml has 4 xml operations, ... +It should be 5+3+4+.. operations in total, which is a huge number. + +(2) Each data format has different name with same struct +json -> json * op -> json -> JSONPlaintextOverrider []PlaintextObjectOverrider +yaml -> yaml * op -> yaml -> YAMLPlaintextOverrider []PlaintextObjectOverrider +xml -> xml * op -> xml -> XMLPlaintextOverrider []PlaintextObjectOverrider +... +This one is designed for **JSON operations** for all data formats. +(3) enum +json -> json * op -> json -> enum +yaml -> json * op -> yaml -> enum +xml -> json * op -> xml -> enum +... +This one is designed for **JSON operations** for all data formats. + +### User usage example for (1) +For example, consider a ConfigMap with the following data in member cluster: + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: example-configmap + namespace: default +data: + config.json: | + { + "keyA": "valueA", + "keyB": "valueB", + "keyC": "valueC", + "keyD": "valueD", + "keyE": "valueE", + "keyF": "valueF" + } +``` + +The following is OverridePolicy which uses JSONPlaintextOverrider: + +```yaml +apiVersion: policy.karmada.io/v1alpha1 +kind: OverridePolicy +metadata: + name: example-override + namespace: default +spec: + targetCluster: + clusterNames: + - member1 + resourceSelectors: + - apiVersion: v1 + kind: ConfigMap + name: example-configmap + namespace: default + overrideRules: + - overriders: + jsonPlaintextOverrider: + - path: /data/config.json + patch: + - path: /keyA + operator: test + value: "valueA" + - path: /keyD + operator: add + value: "" + - path: /keyB + operator: remove + - path: /keyC + operator: replace + value: "newly added value" + - from: /keyD + path: /keyF + operator: move + - from: /keyE + path: /keyG + operator: copy + mergeValue: + { + "keyH": "valueH", + "keyI": "valueI" + } + mergeRawValue: '{"keyJ": "valueJ","keyK": "valueK"}' +``` + +After we apply this policy, we can modify the config.json + +1. Test: The operation checks if keyA has the value `valueA`. Since it does, the operation proceeds. +2. Add: Adds an empty string as the value for keyD. +3. Remove: Removes keyB and its value. +4. Replace: Replaces the value of keyC with `newly added value`. +5. Move: Moves the value of keyD to keyF, which effectively deletes keyD and sets the value of keyF to `valueD`. +6. Copy: Copies the value of keyE to keyG. +7. Merge: Adds new keys keyH and keyI with values `valueH` and `valueI`. +8. Merge Raw Value: Adds keys keyJ and keyK with values `valueJ` and `valueK`. + Finally, we get a new config.json with JSON operation. + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: example-configmap + namespace: default +data: + config.json: | + { + "keyA": "valueA", + "keyC": "newly added value", + "keyE": "valueE", + "keyF": "valueD", + "keyG": "valueE", + "keyH": "valueH", + "keyI": "valueI", + "keyJ": "valueJ", + "keyK": "valueK" + } +``` \ No newline at end of file diff --git a/go.mod b/go.mod index ac69a20ecd88..550fe9d0a37e 100644 --- a/go.mod +++ b/go.mod @@ -8,6 +8,7 @@ require ( github.com/emirpasic/gods v1.18.1 github.com/evanphx/json-patch/v5 v5.9.0 github.com/go-co-op/gocron v1.30.1 + github.com/go-openapi/jsonpointer v0.20.2 github.com/gogo/protobuf v1.3.2 github.com/google/go-cmp v0.6.0 github.com/google/uuid v1.5.0 @@ -90,7 +91,6 @@ require ( github.com/go-logr/logr v1.4.1 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-logr/zapr v1.3.0 // indirect - github.com/go-openapi/jsonpointer v0.20.2 // indirect github.com/go-openapi/jsonreference v0.20.4 // indirect github.com/go-openapi/swag v0.22.7 // indirect github.com/go-task/slim-sprig/v3 v3.0.0 // indirect diff --git a/hack/local-up-karmada.sh b/hack/local-up-karmada.sh index 991ccb290b6e..5b6521bd2709 100755 --- a/hack/local-up-karmada.sh +++ b/hack/local-up-karmada.sh @@ -61,6 +61,7 @@ MEMBER_CLUSTER_1_TMP_CONFIG="${KUBECONFIG_PATH}/${MEMBER_TMP_CONFIG_PREFIX}-${ME MEMBER_CLUSTER_2_TMP_CONFIG="${KUBECONFIG_PATH}/${MEMBER_TMP_CONFIG_PREFIX}-${MEMBER_CLUSTER_2_NAME}.config" PULL_MODE_CLUSTER_TMP_CONFIG="${KUBECONFIG_PATH}/${MEMBER_TMP_CONFIG_PREFIX}-${PULL_MODE_CLUSTER_NAME}.config" HOST_IPADDRESS=${1:-} +BUILD_FROM_SOURCE=${BUILD_FROM_SOURCE:-"true"} CLUSTER_VERSION=${CLUSTER_VERSION:-"${DEFAULT_CLUSTER_VERSION}"} KIND_LOG_FILE=${KIND_LOG_FILE:-"/tmp/karmada"} @@ -133,11 +134,13 @@ util::create_cluster "${PULL_MODE_CLUSTER_NAME}" "${PULL_MODE_CLUSTER_TMP_CONFIG #step2. make images and get karmadactl export VERSION="latest" export REGISTRY="docker.io/karmada" -export KARMADA_IMAGE_LABEL_VALUE="May_be_pruned_in_local-up-karmada.sh" -export DOCKER_BUILD_ARGS="${DOCKER_BUILD_ARGS:-} --label=image.karmada.io=${KARMADA_IMAGE_LABEL_VALUE}" -make images GOOS="linux" --directory="${REPO_ROOT}" -#clean up dangling images -docker image prune --force --filter "label=image.karmada.io=${KARMADA_IMAGE_LABEL_VALUE}" +if [[ "${BUILD_FROM_SOURCE}" == "true" ]]; then + export KARMADA_IMAGE_LABEL_VALUE="May_be_pruned_in_local-up-karmada.sh" + export DOCKER_BUILD_ARGS="${DOCKER_BUILD_ARGS:-} --label=image.karmada.io=${KARMADA_IMAGE_LABEL_VALUE}" + make images GOOS="linux" --directory="${REPO_ROOT}" + #clean up dangling images + docker image prune --force --filter "label=image.karmada.io=${KARMADA_IMAGE_LABEL_VALUE}" +fi GO111MODULE=on go install "github.com/karmada-io/karmada/cmd/karmadactl" GOPATH=$(go env GOPATH | awk -F ':' '{print $1}') @@ -148,14 +151,16 @@ echo "Waiting for the host clusters to be ready..." util::check_clusters_ready "${MAIN_KUBECONFIG}" "${HOST_CLUSTER_NAME}" #step4. load components images to kind cluster -kind load docker-image "${REGISTRY}/karmada-controller-manager:${VERSION}" --name="${HOST_CLUSTER_NAME}" -kind load docker-image "${REGISTRY}/karmada-scheduler:${VERSION}" --name="${HOST_CLUSTER_NAME}" -kind load docker-image "${REGISTRY}/karmada-descheduler:${VERSION}" --name="${HOST_CLUSTER_NAME}" -kind load docker-image "${REGISTRY}/karmada-webhook:${VERSION}" --name="${HOST_CLUSTER_NAME}" -kind load docker-image "${REGISTRY}/karmada-scheduler-estimator:${VERSION}" --name="${HOST_CLUSTER_NAME}" -kind load docker-image "${REGISTRY}/karmada-aggregated-apiserver:${VERSION}" --name="${HOST_CLUSTER_NAME}" -kind load docker-image "${REGISTRY}/karmada-search:${VERSION}" --name="${HOST_CLUSTER_NAME}" -kind load docker-image "${REGISTRY}/karmada-metrics-adapter:${VERSION}" --name="${HOST_CLUSTER_NAME}" +if [[ "${BUILD_FROM_SOURCE}" == "true" ]]; then + kind load docker-image "${REGISTRY}/karmada-controller-manager:${VERSION}" --name="${HOST_CLUSTER_NAME}" + kind load docker-image "${REGISTRY}/karmada-scheduler:${VERSION}" --name="${HOST_CLUSTER_NAME}" + kind load docker-image "${REGISTRY}/karmada-descheduler:${VERSION}" --name="${HOST_CLUSTER_NAME}" + kind load docker-image "${REGISTRY}/karmada-webhook:${VERSION}" --name="${HOST_CLUSTER_NAME}" + kind load docker-image "${REGISTRY}/karmada-scheduler-estimator:${VERSION}" --name="${HOST_CLUSTER_NAME}" + kind load docker-image "${REGISTRY}/karmada-aggregated-apiserver:${VERSION}" --name="${HOST_CLUSTER_NAME}" + kind load docker-image "${REGISTRY}/karmada-search:${VERSION}" --name="${HOST_CLUSTER_NAME}" + kind load docker-image "${REGISTRY}/karmada-metrics-adapter:${VERSION}" --name="${HOST_CLUSTER_NAME}" +fi #step5. install karmada control plane components "${REPO_ROOT}"/hack/deploy-karmada.sh "${MAIN_KUBECONFIG}" "${HOST_CLUSTER_NAME}" @@ -185,7 +190,9 @@ ${KARMADACTL_BIN} join --karmada-context="${KARMADA_APISERVER_CLUSTER_NAME}" ${M # wait until the pull mode cluster ready util::check_clusters_ready "${PULL_MODE_CLUSTER_TMP_CONFIG}" "${PULL_MODE_CLUSTER_NAME}" -kind load docker-image "${REGISTRY}/karmada-agent:${VERSION}" --name="${PULL_MODE_CLUSTER_NAME}" +if [[ "${BUILD_FROM_SOURCE}" == "true" ]]; then + kind load docker-image "${REGISTRY}/karmada-agent:${VERSION}" --name="${PULL_MODE_CLUSTER_NAME}" +fi #step7. deploy karmada agent in pull mode member clusters "${REPO_ROOT}"/hack/deploy-agent-and-estimator.sh "${MAIN_KUBECONFIG}" "${HOST_CLUSTER_NAME}" "${MAIN_KUBECONFIG}" "${KARMADA_APISERVER_CLUSTER_NAME}" "${PULL_MODE_CLUSTER_TMP_CONFIG}" "${PULL_MODE_CLUSTER_NAME}" diff --git a/operator/README.md b/operator/README.md index d579a32a642d..b0d9191d6955 100644 --- a/operator/README.md +++ b/operator/README.md @@ -23,11 +23,12 @@ This section describes how to install `karmada-operator` and create a Karmada in #### Helm install -Go to the root directory of the `karmada-io/karmada` repo. To install the Helm Chart with -the release name `karmada-operator` in the namespace `karmada-system`, simply run the helm command: +Go to the root directory of the `karmada-io/karmada` repo. Before installing the Helm Chart, ensure the `karmada-operator` is set to the preferred released version. You can check the [latest tag on GitHub releases](https://github.com/karmada-io/karmada/releases/latest). + +To install the Helm Chart with the release name `karmada-operator` in the namespace `karmada-system`, run the following command, replacing `${preferred-released-version}` with the desired version: ```shell -helm install karmada-operator -n karmada-system --create-namespace --dependency-update ./charts/karmada-operator --debug +helm install karmada-operator -n karmada-system --create-namespace --dependency-update ./charts/karmada-operator --set operator.image.tag=${preferred-released-version} --debug ``` #### Using YAML resource diff --git a/operator/pkg/controlplane/controlplane_test.go b/operator/pkg/controlplane/controlplane_test.go new file mode 100644 index 000000000000..4ccd14df9a04 --- /dev/null +++ b/operator/pkg/controlplane/controlplane_test.go @@ -0,0 +1,454 @@ +/* +Copyright 2024 The Karmada Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controlplane + +import ( + "fmt" + "testing" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + fakeclientset "k8s.io/client-go/kubernetes/fake" + coretesting "k8s.io/client-go/testing" + "k8s.io/utils/ptr" + + operatorv1alpha1 "github.com/karmada-io/karmada/operator/pkg/apis/operator/v1alpha1" + "github.com/karmada-io/karmada/operator/pkg/constants" + "github.com/karmada-io/karmada/operator/pkg/util" +) + +func TestEnsureAllControlPlaneComponents(t *testing.T) { + var replicas int32 = 2 + name, namespace := "karmada-demo", "test" + imagePullPolicy := corev1.PullIfNotPresent + annotations := map[string]string{"annotationKey": "annotationValue"} + labels := map[string]string{"labelKey": "labelValue"} + extraArgs := map[string]string{"cmd1": "arg1", "cmd2": "arg2"} + + cfg := &operatorv1alpha1.KarmadaComponents{ + KubeControllerManager: &operatorv1alpha1.KubeControllerManager{ + CommonSettings: operatorv1alpha1.CommonSettings{ + Image: operatorv1alpha1.Image{ + ImageRepository: "registry.k8s.io/kube-controller-manager", + ImageTag: "latest", + }, + Replicas: ptr.To[int32](replicas), + Annotations: annotations, + Labels: labels, + Resources: corev1.ResourceRequirements{}, + ImagePullPolicy: imagePullPolicy, + }, + ExtraArgs: extraArgs, + }, + KarmadaControllerManager: &operatorv1alpha1.KarmadaControllerManager{ + CommonSettings: operatorv1alpha1.CommonSettings{ + Image: operatorv1alpha1.Image{ + ImageRepository: "docker.io/karmada/karmada-controller-manager", + ImageTag: "latest", + }, + Replicas: ptr.To[int32](replicas), + Annotations: annotations, + Labels: labels, + ImagePullPolicy: imagePullPolicy, + }, + ExtraArgs: extraArgs, + }, + KarmadaScheduler: &operatorv1alpha1.KarmadaScheduler{ + CommonSettings: operatorv1alpha1.CommonSettings{ + Image: operatorv1alpha1.Image{ + ImageRepository: "docker.io/karmada/karmada-scheduler", + ImageTag: "latest", + }, + Replicas: ptr.To[int32](replicas), + Annotations: annotations, + Labels: labels, + Resources: corev1.ResourceRequirements{}, + ImagePullPolicy: imagePullPolicy, + }, + ExtraArgs: extraArgs, + }, + KarmadaDescheduler: &operatorv1alpha1.KarmadaDescheduler{ + CommonSettings: operatorv1alpha1.CommonSettings{ + Image: operatorv1alpha1.Image{ + ImageRepository: "docker.io/karmada/karmada-descheduler", + ImageTag: "latest", + }, + Replicas: ptr.To[int32](replicas), + Annotations: annotations, + Labels: labels, + Resources: corev1.ResourceRequirements{}, + ImagePullPolicy: imagePullPolicy, + }, + ExtraArgs: extraArgs, + }, + } + + fakeClient := fakeclientset.NewSimpleClientset() + + components := []string{ + constants.KubeControllerManagerComponent, + constants.KarmadaControllerManagerComponent, + constants.KarmadaSchedulerComponent, + constants.KarmadaDeschedulerComponent, + } + + for _, component := range components { + err := EnsureControlPlaneComponent(component, name, namespace, map[string]bool{}, fakeClient, cfg) + if err != nil { + t.Fatalf("failed to ensure %s controlplane component: %v", component, err) + } + } + + actions := fakeClient.Actions() + if len(actions) != len(components) { + t.Fatalf("expected %d actions, but got %d", len(components), len(actions)) + } + + for _, action := range actions { + createAction, ok := action.(coretesting.CreateAction) + if !ok { + t.Errorf("expected CreateAction, but got %T", action) + } + + if createAction.GetResource().Resource != "deployments" { + t.Errorf("expected action on 'deployments', but got '%s'", createAction.GetResource().Resource) + } + } +} + +func TestGetKubeControllerManagerManifest(t *testing.T) { + var replicas int32 = 2 + name, namespace := "karmada-demo", "test" + image, imageTag := "registry.k8s.io/kube-controller-manager", "latest" + imagePullPolicy := corev1.PullIfNotPresent + annotations := map[string]string{"annotationKey": "annotationValue"} + labels := map[string]string{"labelKey": "labelValue"} + extraArgs := map[string]string{"cmd1": "arg1", "cmd2": "arg2"} + + cfg := &operatorv1alpha1.KubeControllerManager{ + CommonSettings: operatorv1alpha1.CommonSettings{ + Image: operatorv1alpha1.Image{ + ImageRepository: image, + ImageTag: imageTag, + }, + Replicas: ptr.To[int32](replicas), + Annotations: annotations, + Labels: labels, + Resources: corev1.ResourceRequirements{}, + ImagePullPolicy: imagePullPolicy, + }, + ExtraArgs: extraArgs, + } + + deployment, err := getKubeControllerManagerManifest(name, namespace, cfg) + if err != nil { + t.Fatalf("failed to get kube controller manager manifest: %v", err) + } + + deployment, _, err = verifyDeploymentDetails( + deployment, replicas, imagePullPolicy, extraArgs, namespace, + image, imageTag, util.KubeControllerManagerName(name), + ) + if err != nil { + t.Errorf("failed to verify kube controller manager deployment details: %v", err) + } + + expectedSecrets := []string{ + util.AdminKubeconfigSecretName(name), + util.KarmadaCertSecretName(name), + } + err = verifySecrets(deployment, expectedSecrets) + if err != nil { + t.Errorf("failed to verify kube controller manager secrets: %v", err) + } +} + +func TestGetKarmadaControllerManagerManifest(t *testing.T) { + var replicas int32 = 2 + name, namespace := "karmada-demo", "test" + image, imageTag := "docker.io/karmada/karmada-controller-manager", "latest" + imagePullPolicy := corev1.PullIfNotPresent + annotations := map[string]string{"annotationKey": "annotationValue"} + labels := map[string]string{"labelKey": "labelValue"} + extraArgs := map[string]string{"cmd1": "arg1", "cmd2": "arg2"} + + cfg := &operatorv1alpha1.KarmadaControllerManager{ + CommonSettings: operatorv1alpha1.CommonSettings{ + Image: operatorv1alpha1.Image{ + ImageRepository: image, + ImageTag: imageTag, + }, + Replicas: ptr.To[int32](replicas), + Annotations: annotations, + Labels: labels, + ImagePullPolicy: imagePullPolicy, + }, + ExtraArgs: extraArgs, + } + + featureGates := map[string]bool{"FeatureA": true} + + deployment, err := getKarmadaControllerManagerManifest(name, namespace, featureGates, cfg) + if err != nil { + t.Fatalf("failed to get karmada controller manager manifest: %v", err) + } + + deployment, container, err := verifyDeploymentDetails( + deployment, replicas, imagePullPolicy, extraArgs, namespace, + image, imageTag, util.KarmadaControllerManagerName(name), + ) + if err != nil { + t.Errorf("failed to verify karmada controller manager deployment details: %v", err) + } + + err = verifyFeatureGates(container, featureGates) + if err != nil { + t.Errorf("failed to verify karmada controller manager feature gates: %v", err) + } + + err = verifySystemNamespace(container) + if err != nil { + t.Errorf("failed to verify karmada controller manager system namespace: %v", err) + } + + expectedSecrets := []string{util.AdminKubeconfigSecretName(name)} + err = verifySecrets(deployment, expectedSecrets) + if err != nil { + t.Errorf("failed to verify karmada controller manager secrets: %v", err) + } +} + +func TestGetKarmadaSchedulerManifest(t *testing.T) { + var replicas int32 = 2 + name, namespace := "karmada-demo", "test" + image, imageTag := "docker.io/karmada/karmada-scheduler", "latest" + imagePullPolicy := corev1.PullIfNotPresent + annotations := map[string]string{"annotationKey": "annotationValue"} + labels := map[string]string{"labelKey": "labelValue"} + extraArgs := map[string]string{"cmd1": "arg1", "cmd2": "arg2"} + + cfg := &operatorv1alpha1.KarmadaScheduler{ + CommonSettings: operatorv1alpha1.CommonSettings{ + Image: operatorv1alpha1.Image{ + ImageRepository: image, + ImageTag: imageTag, + }, + Replicas: ptr.To[int32](replicas), + Annotations: annotations, + Labels: labels, + Resources: corev1.ResourceRequirements{}, + ImagePullPolicy: imagePullPolicy, + }, + ExtraArgs: extraArgs, + } + + featureGates := map[string]bool{"FeatureA": true} + + deployment, err := getKarmadaSchedulerManifest(name, namespace, featureGates, cfg) + if err != nil { + t.Fatalf("failed to get karmada scheduler manifest: %v", err) + } + + deployment, container, err := verifyDeploymentDetails( + deployment, replicas, imagePullPolicy, extraArgs, namespace, + image, imageTag, util.KarmadaSchedulerName(name), + ) + if err != nil { + t.Errorf("failed to verify karmada scheduler deployment details: %v", err) + } + + err = verifyFeatureGates(container, featureGates) + if err != nil { + t.Errorf("failed to verify karmada scheduler feature gates: %v", err) + } + + err = verifySystemNamespace(container) + if err != nil { + t.Errorf("failed to verify karmada scheduler system namespace: %v", err) + } + + expectedSecrets := []string{ + util.AdminKubeconfigSecretName(name), + util.KarmadaCertSecretName(name), + } + err = verifySecrets(deployment, expectedSecrets) + if err != nil { + t.Errorf("failed to verify karmada scheduler secrets: %v", err) + } +} + +func TestGetKarmadaDeschedulerManifest(t *testing.T) { + var replicas int32 = 2 + name, namespace := "karmada-demo", "test" + image, imageTag := "docker.io/karmada/karmada-descheduler", "latest" + imagePullPolicy := corev1.PullIfNotPresent + annotations := map[string]string{"annotationKey": "annotationValue"} + labels := map[string]string{"labelKey": "labelValue"} + extraArgs := map[string]string{"cmd1": "arg1", "cmd2": "arg2"} + + cfg := &operatorv1alpha1.KarmadaDescheduler{ + CommonSettings: operatorv1alpha1.CommonSettings{ + Image: operatorv1alpha1.Image{ + ImageRepository: image, + ImageTag: imageTag, + }, + Replicas: ptr.To[int32](replicas), + Annotations: annotations, + Labels: labels, + Resources: corev1.ResourceRequirements{}, + ImagePullPolicy: imagePullPolicy, + }, + ExtraArgs: extraArgs, + } + + featureGates := map[string]bool{"FeatureA": true} + + deployment, err := getKarmadaDeschedulerManifest(name, namespace, featureGates, cfg) + if err != nil { + t.Fatalf("failed to get karmada descheduler manifest: %v", err) + } + + deployment, container, err := verifyDeploymentDetails( + deployment, replicas, imagePullPolicy, extraArgs, namespace, + image, imageTag, util.KarmadaDeschedulerName(name), + ) + if err != nil { + t.Errorf("failed to verify karmada descheduler deployment details: %v", err) + } + + err = verifyFeatureGates(container, featureGates) + if err != nil { + t.Errorf("failed to verify karmada descheduler feature gates: %v", err) + } + + err = verifySystemNamespace(container) + if err != nil { + t.Errorf("failed to verify karmada descheduler system namespace: %v", err) + } + + expectedSecrets := []string{ + util.AdminKubeconfigSecretName(name), + util.KarmadaCertSecretName(name), + } + err = verifySecrets(deployment, expectedSecrets) + if err != nil { + t.Errorf("failed to verify karmada descheduler secrets: %v", err) + } +} + +// verifyDeploymentDetails ensures that the specified deployment contains the +// correct configuration for replicas, image pull policy, extra args, and image. +// It validates that the deployment matches the expected Karmada Controlplane settings. +// It could be against Kube Controller Manager, Karmada Controller Manager, Karmada Scheduler, +// and Karmada Descheduler. +func verifyDeploymentDetails(deployment *appsv1.Deployment, replicas int32, imagePullPolicy corev1.PullPolicy, extraArgs map[string]string, namespace, image, imageTag, expectedDeploymentName string) (*appsv1.Deployment, *corev1.Container, error) { + if deployment.Name != expectedDeploymentName { + return nil, nil, fmt.Errorf("expected deployment name '%s', but got '%s'", expectedDeploymentName, deployment.Name) + } + + if deployment.Namespace != namespace { + return nil, nil, fmt.Errorf("expected deployment namespace '%s', but got '%s'", namespace, deployment.Namespace) + } + + if _, exists := deployment.Annotations["annotationKey"]; !exists { + return nil, nil, fmt.Errorf("expected annotation with key 'annotationKey' and value 'annotationValue', but it was missing") + } + + if _, exists := deployment.Labels["labelKey"]; !exists { + return nil, nil, fmt.Errorf("expected label with key 'labelKey' and value 'labelValue', but it was missing") + } + + if deployment.Spec.Replicas == nil || *deployment.Spec.Replicas != replicas { + return nil, nil, fmt.Errorf("expected replicas to be %d, but got %d", replicas, deployment.Spec.Replicas) + } + + containers := deployment.Spec.Template.Spec.Containers + if len(containers) != 1 { + return nil, nil, fmt.Errorf("expected exactly 1 container, but got %d", len(containers)) + } + + expectedImage := fmt.Sprintf("%s:%s", image, imageTag) + container := containers[0] + if container.Image != expectedImage { + return nil, nil, fmt.Errorf("expected container image '%s', but got '%s'", expectedImage, container.Image) + } + + if container.ImagePullPolicy != imagePullPolicy { + return nil, nil, fmt.Errorf("expected image pull policy '%s', but got '%s'", imagePullPolicy, container.ImagePullPolicy) + } + + err := verifyExtraArgs(&container, extraArgs) + if err != nil { + return nil, nil, fmt.Errorf("failed to verify extra args: %v", err) + } + + return deployment, &container, nil +} + +// verifySystemNamespace validates that expected system namespace is present in the container commands. +func verifySystemNamespace(container *corev1.Container) error { + leaderElectResourceSystemNamespaceArg := fmt.Sprintf("--leader-elect-resource-namespace=%s", constants.KarmadaSystemNamespace) + if !contains(container.Command, leaderElectResourceSystemNamespaceArg) { + return fmt.Errorf("leader elect resource namespace argument '%s' not found in container command with value %s", leaderElectResourceSystemNamespaceArg, constants.KarmadaSystemNamespace) + } + return nil +} + +// verifySecrets validates that the expected secrets are present in the Deployment's volumes. +func verifySecrets(deployment *appsv1.Deployment, expectedSecrets []string) error { + var extractedSecrets []string + for _, volume := range deployment.Spec.Template.Spec.Volumes { + extractedSecrets = append(extractedSecrets, volume.Secret.SecretName) + } + for _, expectedSecret := range expectedSecrets { + if !contains(extractedSecrets, expectedSecret) { + return fmt.Errorf("expected secret '%s' not found in extracted secrets", expectedSecret) + } + } + return nil +} + +// verifyExtraArgs checks that the container command includes the extra arguments. +func verifyExtraArgs(container *corev1.Container, extraArgs map[string]string) error { + for key, value := range extraArgs { + expectedArg := fmt.Sprintf("--%s=%s", key, value) + if !contains(container.Command, expectedArg) { + return fmt.Errorf("expected container commands to include '%s', but it was missing", expectedArg) + } + } + return nil +} + +// verifyFeatureGates ensures the container's command includes the specified feature gates. +func verifyFeatureGates(container *corev1.Container, featureGates map[string]bool) error { + var featureGatesArg string + for key, value := range featureGates { + featureGatesArg += fmt.Sprintf("%s=%t,", key, value) + } + featureGatesArg = fmt.Sprintf("--feature-gates=%s", featureGatesArg[:len(featureGatesArg)-1]) + if !contains(container.Command, featureGatesArg) { + return fmt.Errorf("expected container commands to include '%s', but it was missing", featureGatesArg) + } + return nil +} + +// contains check if a slice contains a specific string. +func contains(slice []string, item string) bool { + for _, s := range slice { + if s == item { + return true + } + } + return false +} diff --git a/operator/pkg/controlplane/manifests.go b/operator/pkg/controlplane/manifests.go index 1c53bc514b57..07b49a8a2434 100644 --- a/operator/pkg/controlplane/manifests.go +++ b/operator/pkg/controlplane/manifests.go @@ -188,7 +188,7 @@ spec: command: - /bin/karmada-scheduler - --kubeconfig=/etc/karmada/kubeconfig - - --metrics-bind-address=0.0.0.0:10351 + - --metrics-bind-address=0.0.0.0:8080 - --health-probe-bind-address=0.0.0.0:10351 - --enable-scheduler-estimator=true - --leader-elect-resource-namespace={{ .SystemNamespace }} @@ -206,7 +206,7 @@ spec: periodSeconds: 15 timeoutSeconds: 5 ports: - - containerPort: 10351 + - containerPort: 8080 name: metrics protocol: TCP volumeMounts: @@ -256,7 +256,7 @@ spec: command: - /bin/karmada-descheduler - --kubeconfig=/etc/karmada/kubeconfig - - --metrics-bind-address=0.0.0.0:10358 + - --metrics-bind-address=0.0.0.0:8080 - --health-probe-bind-address=0.0.0.0:10358 - --leader-elect-resource-namespace={{ .SystemNamespace }} - --scheduler-estimator-ca-file=/etc/karmada/pki/ca.crt @@ -273,7 +273,7 @@ spec: periodSeconds: 15 timeoutSeconds: 5 ports: - - containerPort: 10358 + - containerPort: 8080 name: metrics protocol: TCP volumeMounts: diff --git a/operator/pkg/controlplane/webhook/webhook_test.go b/operator/pkg/controlplane/webhook/webhook_test.go new file mode 100644 index 000000000000..8d009e3780ef --- /dev/null +++ b/operator/pkg/controlplane/webhook/webhook_test.go @@ -0,0 +1,259 @@ +/* +Copyright 2024 The Karmada Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package webhook + +import ( + "fmt" + "testing" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + fakeclientset "k8s.io/client-go/kubernetes/fake" + coretesting "k8s.io/client-go/testing" + "k8s.io/utils/ptr" + + operatorv1alpha1 "github.com/karmada-io/karmada/operator/pkg/apis/operator/v1alpha1" + "github.com/karmada-io/karmada/operator/pkg/util" +) + +func TestEnsureKarmadaWebhook(t *testing.T) { + var replicas int32 = 2 + image, imageTag := "docker.io/karmada/karmada-webhook", "latest" + name := "karmada-demo" + namespace := "test" + imagePullPolicy := corev1.PullIfNotPresent + annotations := map[string]string{"annotationKey": "annotationValue"} + labels := map[string]string{"labelKey": "labelValue"} + extraArgs := map[string]string{"cmd1": "arg1", "cmd2": "arg2"} + + cfg := &operatorv1alpha1.KarmadaWebhook{ + CommonSettings: operatorv1alpha1.CommonSettings{ + Image: operatorv1alpha1.Image{ + ImageRepository: image, + ImageTag: imageTag, + }, + Replicas: ptr.To[int32](replicas), + Annotations: annotations, + Labels: labels, + Resources: corev1.ResourceRequirements{}, + ImagePullPolicy: imagePullPolicy, + }, + ExtraArgs: extraArgs, + } + + // Create fake clientset. + fakeClient := fakeclientset.NewSimpleClientset() + + err := EnsureKarmadaWebhook(fakeClient, cfg, name, namespace, map[string]bool{}) + if err != nil { + t.Fatalf("failed to ensure karmada webhook, but got: %v", err) + } + + actions := fakeClient.Actions() + if len(actions) != 2 { + t.Fatalf("expected 2 actions, but got %d", len(actions)) + } +} + +func TestInstallKarmadaWebhook(t *testing.T) { + var replicas int32 = 2 + image, imageTag := "docker.io/karmada/karmada-webhook", "latest" + name := "karmada-demo" + namespace := "test" + imagePullPolicy := corev1.PullIfNotPresent + annotations := map[string]string{"annotationKey": "annotationValue"} + labels := map[string]string{"labelKey": "labelValue"} + extraArgs := map[string]string{"cmd1": "arg1", "cmd2": "arg2"} + + cfg := &operatorv1alpha1.KarmadaWebhook{ + CommonSettings: operatorv1alpha1.CommonSettings{ + Image: operatorv1alpha1.Image{ + ImageRepository: image, + ImageTag: imageTag, + }, + Replicas: ptr.To[int32](replicas), + Annotations: annotations, + Labels: labels, + Resources: corev1.ResourceRequirements{}, + ImagePullPolicy: imagePullPolicy, + }, + ExtraArgs: extraArgs, + } + + // Create fake clientset. + fakeClient := fakeclientset.NewSimpleClientset() + + err := installKarmadaWebhook(fakeClient, cfg, name, namespace, map[string]bool{}) + if err != nil { + t.Fatalf("failed to install karmada webhook: %v", err) + } + + err = verifyDeploymentCreation(fakeClient, replicas, imagePullPolicy, extraArgs, name, namespace, image, imageTag) + if err != nil { + t.Fatalf("failed to verify karmada webhook deployment creation: %v", err) + } +} + +func TestCreateKarmadaWebhookService(t *testing.T) { + // Define inputs. + name := "karmada-demo" + namespace := "test" + + // Initialize fake clientset. + client := fakeclientset.NewSimpleClientset() + + err := createKarmadaWebhookService(client, name, namespace) + if err != nil { + t.Fatalf("failed to create karmada webhook service") + } + + // Ensure the expected action (service creation) occurred. + actions := client.Actions() + if len(actions) != 1 { + t.Fatalf("expected 1 actions, but got %d actions", len(actions)) + } + + // Validate the action is a CreateAction and it's for the correct resource (Service). + createAction, ok := actions[0].(coretesting.CreateAction) + if !ok { + t.Fatalf("expected CreateAction, but got %T", actions[0]) + } + + if createAction.GetResource().Resource != "services" { + t.Fatalf("expected action on 'services', but got '%s'", createAction.GetResource().Resource) + } + + // Validate the created service object. + service := createAction.GetObject().(*corev1.Service) + expectedServiceName := util.KarmadaWebhookName(name) + if service.Name != expectedServiceName { + t.Fatalf("expected service name '%s', but got '%s'", expectedServiceName, service.Name) + } + + if service.Namespace != namespace { + t.Fatalf("expected service namespace '%s', but got '%s'", namespace, service.Namespace) + } +} + +// verifyDeploymentCreation validates the details of a Deployment against the expected parameters. +func verifyDeploymentCreation(client *fakeclientset.Clientset, replicas int32, imagePullPolicy corev1.PullPolicy, extraArgs map[string]string, name, namespace, image, imageTag string) error { + // Assert that a Deployment was created. + actions := client.Actions() + if len(actions) != 1 { + return fmt.Errorf("expected exactly 1 action either create or update, but got %d actions", len(actions)) + } + + // Check that the action was a Deployment creation. + createAction, ok := actions[0].(coretesting.CreateAction) + if !ok { + return fmt.Errorf("expected a CreateAction, but got %T", actions[0]) + } + + if createAction.GetResource().Resource != "deployments" { + return fmt.Errorf("expected action on 'deployments', but got '%s'", createAction.GetResource().Resource) + } + + deployment := createAction.GetObject().(*appsv1.Deployment) + return verifyDeploymentDetails(deployment, replicas, imagePullPolicy, extraArgs, name, namespace, image, imageTag) +} + +// verifyDeploymentDetails validates the details of a Deployment against the expected parameters. +func verifyDeploymentDetails(deployment *appsv1.Deployment, replicas int32, imagePullPolicy corev1.PullPolicy, extraArgs map[string]string, name, namespace, image, imageTag string) error { + expectedDeploymentName := util.KarmadaWebhookName(name) + if deployment.Name != expectedDeploymentName { + return fmt.Errorf("expected deployment name '%s', but got '%s'", expectedDeploymentName, deployment.Name) + } + + if deployment.Namespace != namespace { + return fmt.Errorf("expected deployment namespace '%s', but got '%s'", namespace, deployment.Namespace) + } + + if _, exists := deployment.Annotations["annotationKey"]; !exists { + return fmt.Errorf("expected annotation with key 'annotationKey' and value 'annotationValue', but it was missing") + } + + if _, exists := deployment.Labels["labelKey"]; !exists { + return fmt.Errorf("expected label with key 'labelKey' and value 'labelValue', but it was missing") + } + + if deployment.Spec.Replicas == nil || *deployment.Spec.Replicas != replicas { + return fmt.Errorf("expected replicas to be %d, but got %d", replicas, deployment.Spec.Replicas) + } + + containers := deployment.Spec.Template.Spec.Containers + if len(containers) != 1 { + return fmt.Errorf("expected exactly 1 container, but got %d", len(containers)) + } + + expectedImage := fmt.Sprintf("%s:%s", image, imageTag) + container := containers[0] + if container.Image != expectedImage { + return fmt.Errorf("expected container image '%s', but got '%s'", expectedImage, container.Image) + } + + if container.ImagePullPolicy != imagePullPolicy { + return fmt.Errorf("expected image pull policy '%s', but got '%s'", imagePullPolicy, container.ImagePullPolicy) + } + + err := verifyExtraArgs(&container, extraArgs) + if err != nil { + return fmt.Errorf("failed to verify extra args: %v", err) + } + + err = verifySecrets(deployment, name) + if err != nil { + return fmt.Errorf("failed to verify secrets: %v", err) + } + + return nil +} + +// verifySecrets validates that the expected secrets are present in the Deployment's volumes. +func verifySecrets(deployment *appsv1.Deployment, name string) error { + var extractedSecrets []string + for _, volume := range deployment.Spec.Template.Spec.Volumes { + extractedSecrets = append(extractedSecrets, volume.Secret.SecretName) + } + expectedSecrets := []string{ + util.AdminKubeconfigSecretName(name), + util.WebhookCertSecretName(name), + } + for _, expectedSecret := range expectedSecrets { + if !contains(extractedSecrets, expectedSecret) { + return fmt.Errorf("expected secret '%s' not found in extracted secrets", expectedSecret) + } + } + return nil +} + +// verifyExtraArgs checks that the container command includes the extra arguments. +func verifyExtraArgs(container *corev1.Container, extraArgs map[string]string) error { + for key, value := range extraArgs { + expectedArg := fmt.Sprintf("--%s=%s", key, value) + if !contains(container.Command, expectedArg) { + return fmt.Errorf("expected container commands to include '%s', but it was missing", expectedArg) + } + } + return nil +} + +// contains check if a slice contains a specific string. +func contains(slice []string, item string) bool { + for _, s := range slice { + if s == item { + return true + } + } + return false +} diff --git a/operator/pkg/karmadaresource/rbac/manifest.go b/operator/pkg/karmadaresource/rbac/manifest.go index 66eff933c37d..4c25dca6c8fc 100644 --- a/operator/pkg/karmadaresource/rbac/manifest.go +++ b/operator/pkg/karmadaresource/rbac/manifest.go @@ -166,5 +166,33 @@ rules: - deletecollection - patch - update +` + // ClusterProxyAdminClusterRole role to proxy member clusters + ClusterProxyAdminClusterRole = ` +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cluster-proxy-admin +rules: +- apiGroups: + - 'cluster.karmada.io' + resources: + - clusters/proxy + verbs: + - '*' +` + // ClusterProxyAdminClusterRoleBinding authorize system:admin to proxy member clusters + ClusterProxyAdminClusterRoleBinding = ` +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cluster-proxy-admin +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cluster-proxy-admin +subjects: + - kind: User + name: "system:admin" ` ) diff --git a/operator/pkg/karmadaresource/rbac/rbac.go b/operator/pkg/karmadaresource/rbac/rbac.go index 2e82b382e79f..3ca2d18666a7 100644 --- a/operator/pkg/karmadaresource/rbac/rbac.go +++ b/operator/pkg/karmadaresource/rbac/rbac.go @@ -30,25 +30,46 @@ import ( // EnsureKarmadaRBAC create karmada resource view and edit clusterrole func EnsureKarmadaRBAC(client clientset.Interface) error { - if err := grantKarmadaResourceViewClusterrole(client); err != nil { + if err := grantClusterProxyAdminRBAC(client); err != nil { return err } - return grantKarmadaResourceEditClusterrole(client) + if err := grantKarmadaResourceViewClusterRole(client); err != nil { + return err + } + return grantKarmadaResourceEditClusterRole(client) +} + +func grantClusterProxyAdminRBAC(client clientset.Interface) error { + role := &rbacv1.ClusterRole{} + if err := kuberuntime.DecodeInto(clientsetscheme.Codecs.UniversalDecoder(), []byte(ClusterProxyAdminClusterRole), role); err != nil { + return fmt.Errorf("err when decoding ClusterProxyAdmin ClusterRole: %w", err) + } + util.MergeLabel(role, util.KarmadaSystemLabel, util.KarmadaSystemLabelValue) + if err := apiclient.CreateOrUpdateClusterRole(client, role); err != nil { + return fmt.Errorf("failed to create or update ClusterRole: %w", err) + } + + roleBinding := &rbacv1.ClusterRoleBinding{} + if err := kuberuntime.DecodeInto(clientsetscheme.Codecs.UniversalDecoder(), []byte(ClusterProxyAdminClusterRoleBinding), roleBinding); err != nil { + return fmt.Errorf("err when decoding ClusterProxyAdmin ClusterRoleBinding: %w", err) + } + util.MergeLabel(role, util.KarmadaSystemLabel, util.KarmadaSystemLabelValue) + return apiclient.CreateOrUpdateClusterRoleBinding(client, roleBinding) } -func grantKarmadaResourceViewClusterrole(client clientset.Interface) error { +func grantKarmadaResourceViewClusterRole(client clientset.Interface) error { role := &rbacv1.ClusterRole{} if err := kuberuntime.DecodeInto(clientsetscheme.Codecs.UniversalDecoder(), []byte(KarmadaResourceViewClusterRole), role); err != nil { - return fmt.Errorf("err when decoding Karmada view Clusterrole: %w", err) + return fmt.Errorf("err when decoding Karmada view ClusterRole: %w", err) } util.MergeLabel(role, util.KarmadaSystemLabel, util.KarmadaSystemLabelValue) return apiclient.CreateOrUpdateClusterRole(client, role) } -func grantKarmadaResourceEditClusterrole(client clientset.Interface) error { +func grantKarmadaResourceEditClusterRole(client clientset.Interface) error { role := &rbacv1.ClusterRole{} if err := kuberuntime.DecodeInto(clientsetscheme.Codecs.UniversalDecoder(), []byte(KarmadaResourceEditClusterRole), role); err != nil { - return fmt.Errorf("err when decoding Karmada edit Clusterrole: %w", err) + return fmt.Errorf("err when decoding Karmada edit ClusterRole: %w", err) } util.MergeLabel(role, util.KarmadaSystemLabel, util.KarmadaSystemLabelValue) return apiclient.CreateOrUpdateClusterRole(client, role) diff --git a/operator/pkg/karmadaresource/webhookconfiguration/webhookconfiguration_test.go b/operator/pkg/karmadaresource/webhookconfiguration/webhookconfiguration_test.go new file mode 100644 index 000000000000..bfaafbd11cc3 --- /dev/null +++ b/operator/pkg/karmadaresource/webhookconfiguration/webhookconfiguration_test.go @@ -0,0 +1,130 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package webhookconfiguration + +import ( + "encoding/base64" + "fmt" + "strings" + "testing" + + admissionregistrationv1 "k8s.io/api/admissionregistration/v1" + fakeclientset "k8s.io/client-go/kubernetes/fake" + coretesting "k8s.io/client-go/testing" + + "github.com/karmada-io/karmada/operator/pkg/util" +) + +func TestEnsureWebhookConfiguration(t *testing.T) { + name, namespace := "karmada-demo", "test" + + // Base64 encoding of the dummy certificate data. + caTestData := "test-ca-data" + caBundle := base64.StdEncoding.EncodeToString([]byte(caTestData)) + + fakeClient := fakeclientset.NewSimpleClientset() + err := EnsureWebhookConfiguration(fakeClient, namespace, name, caBundle) + if err != nil { + t.Fatalf("failed to create karmada mutating webhook configuration and validation webhook configuration: %v", err) + } + + // Ensure the expected action (mutating webhook configuration and validating webhook configuration creation) occurred. + actions := fakeClient.Actions() + if len(actions) != 2 { + t.Fatalf("expected 2 actions, but got %d", len(actions)) + } +} + +func TestMutatingWebhookConfiguration(t *testing.T) { + name, namespace := "karmada-demo", "test" + + // Base64 encoding of the dummy certificate data. + caTestData := "test-ca-data" + caBundle := base64.StdEncoding.EncodeToString([]byte(caTestData)) + + fakeClient := fakeclientset.NewSimpleClientset() + err := mutatingWebhookConfiguration(fakeClient, namespace, name, caBundle) + if err != nil { + t.Fatalf("error creating the mutating webhook configuration: %v", err) + } + + // Ensure the expected action (mutating webhook configuration creation) occurred on the fake clientset. + actions := fakeClient.Actions() + if len(actions) != 1 { + t.Fatalf("expected 1 action, but got %d", len(actions)) + } + + // Validate the action is a CreateAction and it's for the correct resource (MutatingWebhookConfiguration). + createAction, ok := actions[0].(coretesting.CreateAction) + if !ok { + t.Fatalf("expected CreateAction, but got %T", actions[0]) + } + + // Validate the created MutatingWebhookConfiguration object. + mutatingWebhookConfig := createAction.GetObject().(*admissionregistrationv1.MutatingWebhookConfiguration) + serviceName := util.KarmadaWebhookName(name) + for _, webhook := range mutatingWebhookConfig.Webhooks { + clientConfigRootURL := fmt.Sprintf("https://%s.%s.svc:443", serviceName, namespace) + if !strings.HasPrefix(*webhook.ClientConfig.URL, clientConfigRootURL) { + t.Errorf("expected webhook client config url '%s' to start with '%s'", *webhook.ClientConfig.URL, clientConfigRootURL) + } + + if string(webhook.ClientConfig.CABundle) != caTestData { + t.Fatalf("expected webhook client config caBundle %s, but got %s", caTestData, string(webhook.ClientConfig.CABundle)) + } + } +} + +func TestValidatingWebhookConfiguration(t *testing.T) { + name, namespace := "karmada-demo", "test" + + // Base64 encoding of the dummy certificate data. + caTestData := "test-ca-data" + caBundle := base64.StdEncoding.EncodeToString([]byte(caTestData)) + + fakeClient := fakeclientset.NewSimpleClientset() + err := validatingWebhookConfiguration(fakeClient, namespace, name, caBundle) + if err != nil { + t.Fatalf("error creating the mutating webhook configuration: %v", err) + } + + // Ensure the expected action (validating webhook configuration creation) occurred on the fake clientset. + actions := fakeClient.Actions() + if len(actions) != 1 { + t.Fatalf("expected 1 action, but got %d", len(actions)) + } + + // Validate the action is a CreateAction and it's for the correct resource (ValidatingWebhookConfiguration). + createAction, ok := actions[0].(coretesting.CreateAction) + if !ok { + t.Fatalf("expected CreateAction, but got %T", actions[0]) + } + + // Validate the created ValidatingWebhookConfiguration object. + mutatingWebhookConfig := createAction.GetObject().(*admissionregistrationv1.ValidatingWebhookConfiguration) + serviceName := util.KarmadaWebhookName(name) + for _, webhook := range mutatingWebhookConfig.Webhooks { + clientConfigRootURL := fmt.Sprintf("https://%s.%s.svc:443", serviceName, namespace) + if !strings.HasPrefix(*webhook.ClientConfig.URL, clientConfigRootURL) { + t.Errorf("expected webhook client config url '%s' to start with '%s'", *webhook.ClientConfig.URL, clientConfigRootURL) + } + + if string(webhook.ClientConfig.CABundle) != caTestData { + t.Fatalf("expected webhook client config caBundle %s, but got %s", caTestData, string(webhook.ClientConfig.CABundle)) + } + } +} diff --git a/operator/pkg/scheme/scheme_test.go b/operator/pkg/scheme/scheme_test.go new file mode 100644 index 000000000000..c1982babb72e --- /dev/null +++ b/operator/pkg/scheme/scheme_test.go @@ -0,0 +1,54 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package scheme + +import ( + "fmt" + "testing" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + + operatorv1alpha1 "github.com/karmada-io/karmada/operator/pkg/apis/operator/v1alpha1" +) + +func TestSchemeInitialization(t *testing.T) { + // Ensure that the Kubernetes core scheme (for example, Pod) is added. + coreGVK := schema.GroupVersionKind{Group: "", Version: "v1", Kind: "Pod"} + if !isGVKRegistered(Scheme, coreGVK) { + t.Errorf("K8s core scheme should be registered for GVK: %v", coreGVK) + } + + // Ensure that the Karmada operator v1alpha1 scheme (for example, Karmada) is added. + karmadaGVK := schema.GroupVersionKind{ + Group: operatorv1alpha1.GroupVersion.Group, + Version: operatorv1alpha1.GroupVersion.Version, + Kind: "Karmada", + } + if !isGVKRegistered(Scheme, karmadaGVK) { + t.Errorf("Karmada v1alpha1 scheme should be registered for GVK: %v", karmadaGVK) + } +} + +// isGVKRegistered verifies if the scheme contains a specific GVK. +func isGVKRegistered(s *runtime.Scheme, gvk schema.GroupVersionKind) bool { + _, err := s.New(gvk) + if err != nil { + fmt.Printf("Failed to find GVK: %v, Error: %v\n", gvk, err) + } + return err == nil +} diff --git a/operator/pkg/tasks/deinit/cert_test.go b/operator/pkg/tasks/deinit/cert_test.go new file mode 100644 index 000000000000..d8e0953fc589 --- /dev/null +++ b/operator/pkg/tasks/deinit/cert_test.go @@ -0,0 +1,191 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package tasks + +import ( + "context" + "fmt" + "strings" + "testing" + + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + fakeclientset "k8s.io/client-go/kubernetes/fake" + + "github.com/karmada-io/karmada/operator/pkg/constants" + "github.com/karmada-io/karmada/operator/pkg/util" + "github.com/karmada-io/karmada/operator/pkg/workflow" +) + +func TestNewCleanupCertTask(t *testing.T) { + tests := []struct { + name string + wantTask workflow.Task + }{ + { + name: "NewCleanupCertTask_IsCalled_ExpectedWorkflowTask", + wantTask: workflow.Task{ + Name: "cleanup-cert", + Run: runCleanupCert, + RunSubTasks: true, + Tasks: []workflow.Task{ + newCleanupCertSubTask("karmada", util.KarmadaCertSecretName), + newCleanupCertSubTask("etcd", util.EtcdCertSecretName), + newCleanupCertSubTask("webhook", util.WebhookCertSecretName), + }, + }, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + cleanupCertTask := NewCleanupCertTask() + if err := util.DeepEqualTasks(cleanupCertTask, test.wantTask); err != nil { + t.Errorf("unexpected error: %v", err) + } + }) + } +} + +func TestRunCleanupCert(t *testing.T) { + tests := []struct { + name string + runData workflow.RunData + wantErr bool + errMsg string + }{ + { + name: "RunCleanupCert_InvalidTypeAssertion_TypeAssertionIsInvalid", + runData: &MyTestData{Data: "test"}, + wantErr: true, + errMsg: "cleanup-cert task invoked with an invalid data struct", + }, + { + name: "RunCleanupCert_ValidTypeAssertion_TypeAssertionIsValid", + runData: &TestDeInitData{ + name: "karmada-demo", + namespace: "test", + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + err := runCleanupCert(test.runData) + if err == nil && test.wantErr { + t.Errorf("expected error, but got none") + } + if err != nil && !test.wantErr { + t.Errorf("unexpected error: %v", err) + } + if err != nil && test.wantErr && !strings.Contains(err.Error(), test.errMsg) { + t.Errorf("expected %s error msg to contain %s", err.Error(), test.errMsg) + } + }) + } +} + +func TestRunCleanupCertSubTask(t *testing.T) { + tests := []struct { + name string + owner string + secretNameFunc util.Namefunc + runData workflow.RunData + prep func(workflow.RunData) error + verify func(workflow.RunData) error + wantErr bool + errMsg string + }{ + { + name: "RunCleanupCertSubTask_InvalidTypeAssertion_TypeAssertionIsInvalid", + owner: "karmada", + runData: &MyTestData{Data: "test"}, + prep: func(workflow.RunData) error { return nil }, + verify: func(workflow.RunData) error { return nil }, + wantErr: true, + errMsg: fmt.Sprintf("cleanup-%s-cert task invoked with an invalid data struct", "karmada"), + }, + { + name: "RunCleanupCertSubTask_WithKarmadaCertSecret_CertsHaveBeenCleanedUp", + owner: "karmada", + secretNameFunc: util.KarmadaCertSecretName, + prep: prepareKarmadaCertSecret, + verify: verifyKarmadaCertSecretDeleted, + runData: &TestDeInitData{ + name: "karmada-demo", + namespace: "test", + remoteClient: fakeclientset.NewSimpleClientset(), + }, + wantErr: false, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + if err := test.prep(test.runData); err != nil { + t.Errorf("failed to prep the run cleanup cert subtask: %v", err) + } + cleanupCertSubTask := runCleanupCertSubTask(test.owner, test.secretNameFunc) + err := cleanupCertSubTask(test.runData) + if err == nil && test.wantErr { + t.Errorf("expected error, but got none") + } + if err != nil && !test.wantErr { + t.Errorf("unexpected error: %v", err) + } + if err != nil && test.wantErr && !strings.Contains(err.Error(), test.errMsg) { + t.Errorf("expected %s error msg to contain %s", err.Error(), test.errMsg) + } + if err := test.verify(test.runData); err != nil { + t.Errorf("failed to verify run cleanup cert subtask: %v", err) + } + }) + } +} + +func prepareKarmadaCertSecret(rd workflow.RunData) error { + data := rd.(*TestDeInitData) + secretName := util.KarmadaCertSecretName(data.name) + _, err := data.remoteClient.CoreV1().Secrets(data.namespace).Create(context.TODO(), &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: secretName, + Namespace: data.namespace, + Labels: constants.KarmadaOperatorLabel, + }, + Type: corev1.SecretTypeOpaque, + }, metav1.CreateOptions{}) + if err != nil { + return fmt.Errorf("failed to create secret: %v", err) + } + return nil +} + +func verifyKarmadaCertSecretDeleted(rd workflow.RunData) error { + data := rd.(*TestDeInitData) + secretName := util.KarmadaCertSecretName(data.name) + secret, err := data.remoteClient.CoreV1().Secrets(data.namespace).Get(context.TODO(), secretName, metav1.GetOptions{}) + if err == nil { + if val, exists := secret.Labels[constants.KarmadaOperatorLabelKeyName]; exists && val != "" { + return fmt.Errorf("expected secret %s to be deleted, but it still exists with label %s=%s", secretName, constants.KarmadaOperatorLabelKeyName, val) + } + return fmt.Errorf("expected secret %s to be deleted, but it still exists", secretName) + } + if apierrors.IsNotFound(err) { + return nil + } + return err +} diff --git a/operator/pkg/tasks/deinit/test_helpers.go b/operator/pkg/tasks/deinit/test_helpers.go new file mode 100644 index 000000000000..1fe8dd754660 --- /dev/null +++ b/operator/pkg/tasks/deinit/test_helpers.go @@ -0,0 +1,62 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package tasks + +import ( + clientset "k8s.io/client-go/kubernetes" +) + +// TestInterface defines the interface for retrieving test data. +type TestInterface interface { + // Get returns the data from the test instance. + Get() string +} + +// MyTestData is a struct that implements the TestInterface. +type MyTestData struct { + Data string +} + +// Get returns the data stored in the MyTestData struct. +func (m *MyTestData) Get() string { + return m.Data +} + +// TestDeInitData contains the configuration and state required to deinitialize Karmada components. +type TestDeInitData struct { + name string + namespace string + remoteClient clientset.Interface +} + +// Ensure TestDeInitData implements InitData interface at compile time. +var _ DeInitData = &TestDeInitData{} + +// GetName returns the name of the current Karmada installation. +func (t *TestDeInitData) GetName() string { + return t.name +} + +// GetNamespace returns the namespace of the current Karmada installation. +func (t *TestDeInitData) GetNamespace() string { + return t.namespace +} + +// RemoteClient returns the Kubernetes client for remote interactions. +func (t *TestDeInitData) RemoteClient() clientset.Interface { + return t.remoteClient +} diff --git a/operator/pkg/tasks/init/apiserver_test.go b/operator/pkg/tasks/init/apiserver_test.go new file mode 100644 index 000000000000..3d71ee64ebb3 --- /dev/null +++ b/operator/pkg/tasks/init/apiserver_test.go @@ -0,0 +1,449 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package tasks + +import ( + "fmt" + "strings" + "testing" + "time" + + corev1 "k8s.io/api/core/v1" + fakeclientset "k8s.io/client-go/kubernetes/fake" + "k8s.io/client-go/rest" + "k8s.io/utils/ptr" + + operatorv1alpha1 "github.com/karmada-io/karmada/operator/pkg/apis/operator/v1alpha1" + "github.com/karmada-io/karmada/operator/pkg/constants" + "github.com/karmada-io/karmada/operator/pkg/util" + "github.com/karmada-io/karmada/operator/pkg/util/apiclient" + "github.com/karmada-io/karmada/operator/pkg/workflow" +) + +func TestNewAPIServerTask(t *testing.T) { + tests := []struct { + name string + wantTask workflow.Task + }{ + { + name: "NewKarmadaApiserverTask_IsCalled_ExpectedWorkflowTask", + wantTask: workflow.Task{ + Name: "apiserver", + Run: runApiserver, + RunSubTasks: true, + Tasks: []workflow.Task{ + { + Name: constants.KarmadaAPIserverComponent, + Run: runKarmadaAPIServer, + }, + { + Name: fmt.Sprintf("%s-%s", "wait", constants.KarmadaAPIserverComponent), + Run: runWaitKarmadaAPIServer, + }, + }, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + karmadaAPIServerTask := NewKarmadaApiserverTask() + err := util.DeepEqualTasks(karmadaAPIServerTask, test.wantTask) + if err != nil { + t.Errorf("unexpected error, got %v", err) + } + }) + } +} + +func TestNewKarmadaAggregatedApiserverTask(t *testing.T) { + tests := []struct { + name string + wantTask workflow.Task + }{ + { + name: "NewKarmadaAggregatedApiserverTask_IsCalled_ExpectedWorkflowTask", + wantTask: workflow.Task{ + Name: "aggregated-apiserver", + Run: runAggregatedApiserver, + RunSubTasks: true, + Tasks: []workflow.Task{ + { + Name: constants.KarmadaAggregatedAPIServerComponent, + Run: runKarmadaAggregatedAPIServer, + }, + { + Name: fmt.Sprintf("%s-%s", "wait", constants.KarmadaAggregatedAPIServerComponent), + Run: runWaitKarmadaAggregatedAPIServer, + }, + }, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + karmadaAggregatedAPIServerTask := NewKarmadaAggregatedApiserverTask() + err := util.DeepEqualTasks(karmadaAggregatedAPIServerTask, test.wantTask) + if err != nil { + t.Errorf("unexpected error, got %v", err) + } + }) + } +} + +func TestRunAggregatedAPIServer(t *testing.T) { + tests := []struct { + name string + runData workflow.RunData + wantErr bool + errMsg string + }{ + { + name: "RunAggregatedApiserver_InvalidTypeAssertion_TypeAssertionFailed", + runData: MyTestData{Data: "test"}, + wantErr: true, + errMsg: "aggregated-apiserver task invoked with an invalid data struct", + }, + { + name: "RunAggregatedApiserver_ValidTypeAssertion_TypeAssertionSuceeded", + runData: &TestInitData{}, + wantErr: false, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + err := runAggregatedApiserver(test.runData) + if err == nil && test.wantErr { + t.Errorf("expected error, but got none") + } + if err != nil && !test.wantErr { + t.Errorf("unexpected error: %v", err) + } + if err != nil && test.wantErr && !strings.Contains(err.Error(), test.errMsg) { + t.Errorf("expected %s error msg to contain %s", err.Error(), test.errMsg) + } + }) + } +} + +func TestRunAPIServer(t *testing.T) { + tests := []struct { + name string + runData workflow.RunData + wantErr bool + errMsg string + }{ + { + name: "RunAPIServer_InvalidTypeAssertion_TypeAssertionFailed", + runData: MyTestData{Data: "test"}, + wantErr: true, + errMsg: "apiserver task invoked with an invalid data struct", + }, + { + name: "RunAPIServer_ValidTypeAssertion_TypeAssertionSuceeded", + runData: &TestInitData{}, + wantErr: false, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + err := runApiserver(test.runData) + if err == nil && test.wantErr { + t.Errorf("expected error, but got none") + } + if err != nil && !test.wantErr { + t.Errorf("unexpected error: %v", err) + } + if err != nil && test.wantErr && !strings.Contains(err.Error(), test.errMsg) { + t.Errorf("expected %s error msg to contain %s", err.Error(), test.errMsg) + } + }) + } +} + +func TestRunKarmadaAPIServer(t *testing.T) { + tests := []struct { + name string + runData workflow.RunData + wantErr bool + errMsg string + }{ + { + name: "RunKarmadaAPIServer_InvalidTypeAssertion_TypeAssertionFailed", + runData: &MyTestData{Data: "test"}, + wantErr: true, + errMsg: "KarmadaApiserver task invoked with an invalid data struct", + }, + { + name: "RunKarmadaAPIServer_NilKarmadaAPIServer_RunIsCompletedWithoutErrors", + runData: &TestInitData{ + ComponentsUnits: &operatorv1alpha1.KarmadaComponents{}, + }, + wantErr: false, + }, + { + name: "RunKarmadaAPIServer_InitializeKarmadaAPIServer_KarmadaAPIServerEnsured", + runData: &TestInitData{ + Name: "karmada-demo", + Namespace: "test", + ComponentsUnits: &operatorv1alpha1.KarmadaComponents{ + KarmadaAPIServer: &operatorv1alpha1.KarmadaAPIServer{ + CommonSettings: operatorv1alpha1.CommonSettings{ + Image: operatorv1alpha1.Image{ImageTag: "karmada-apiserver-image"}, + Replicas: ptr.To[int32](2), + Resources: corev1.ResourceRequirements{}, + ImagePullPolicy: corev1.PullIfNotPresent, + }, + ServiceSubnet: ptr.To("10.96.0.0/12"), + }, + }, + RemoteClientConnector: fakeclientset.NewSimpleClientset(), + FeatureGatesOptions: map[string]bool{ + "Feature1": true, + }, + }, + wantErr: false, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + err := runKarmadaAPIServer(test.runData) + if err == nil && test.wantErr { + t.Errorf("expected error, but got none") + } + if err != nil && !test.wantErr { + t.Errorf("unexpected error: %v", err) + } + if err != nil && test.wantErr && !strings.Contains(err.Error(), test.errMsg) { + t.Errorf("expected %s error msg to contain %s", err.Error(), test.errMsg) + } + }) + } +} + +func TestRunWaitKarmadaAPIServer(t *testing.T) { + tests := []struct { + name string + runData workflow.RunData + prep func(workflow.RunData) error + wantErr bool + errMsg string + }{ + { + name: "RunWaitKarmadaAPIServer_InvalidTypeAssertion_TypeAssertionFailed", + runData: &MyTestData{Data: "test"}, + prep: func(workflow.RunData) error { return nil }, + wantErr: true, + errMsg: "wait-KarmadaAPIServer task invoked with an invalid data struct", + }, + { + name: "RunWaitKarmadaAPIServer_TimeoutWaitingForSomeKarmadaAPIServersPods_Timeout", + runData: &TestInitData{ + Name: "karmada-demo", + Namespace: "test", + RemoteClientConnector: fakeclientset.NewSimpleClientset(), + ControlplaneConfigREST: &rest.Config{}, + FeatureGatesOptions: map[string]bool{ + "Feature1": true, + }, + }, + prep: func(workflow.RunData) error { + componentBeReadyTimeout = time.Second + return nil + }, + wantErr: true, + errMsg: "waiting for karmada-apiserver to ready timeout", + }, + { + name: "RunWaitKarmadaAPIServer_WaitingForSomeKarmadaAPIServersPods_KarmadaAPIServerIsReady", + runData: &TestInitData{ + Name: "karmada-demo", + Namespace: "test", + RemoteClientConnector: fakeclientset.NewSimpleClientset(), + ControlplaneConfigREST: &rest.Config{}, + FeatureGatesOptions: map[string]bool{ + "Feature1": true, + }, + }, + prep: func(rd workflow.RunData) error { + data := rd.(*TestInitData) + _, err := apiclient.CreatePods(data.RemoteClient(), data.Namespace, util.KarmadaAPIServerName(data.Name), 2, karmadaApiserverLabels, true) + return err + }, + wantErr: false, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + if err := test.prep(test.runData); err != nil { + t.Errorf("failed to prep waiting for Karmada APIServer: %v", err) + } + err := runWaitKarmadaAPIServer(test.runData) + if err == nil && test.wantErr { + t.Errorf("expected error, but got none") + } + if err != nil && !test.wantErr { + t.Errorf("unexpected error: %v", err) + } + if err != nil && test.wantErr && !strings.Contains(err.Error(), test.errMsg) { + t.Errorf("expected %s error msg to contain %s", err.Error(), test.errMsg) + } + }) + } +} + +func TestRunKarmadaAggregatedAPIServer(t *testing.T) { + tests := []struct { + name string + runData workflow.RunData + prep func() error + wantErr bool + errMsg string + }{ + { + name: "RunKarmadaAggregatedAPIServer_InvalidTypeAssertion_TypeAssertionFailed", + runData: &MyTestData{Data: "test"}, + prep: func() error { return nil }, + wantErr: true, + errMsg: "KarmadaAggregatedAPIServer task invoked with an invalid data struct", + }, + { + name: "RunKarmadaAggregatedAPIServer_NilKarmadaAggregatedAPIServer_RunIsCompletedWithoutErrors", + runData: &TestInitData{ + ComponentsUnits: &operatorv1alpha1.KarmadaComponents{}, + }, + prep: func() error { return nil }, + wantErr: false, + }, + { + name: "RunKarmadaAggregatedAPIServer_InitializeKarmadaAggregatedAPIServer_KarmadaAggregatedAPIServerEnsured", + runData: &TestInitData{ + Name: "karmada-demo", + Namespace: "test", + ComponentsUnits: &operatorv1alpha1.KarmadaComponents{ + KarmadaAggregatedAPIServer: &operatorv1alpha1.KarmadaAggregatedAPIServer{ + CommonSettings: operatorv1alpha1.CommonSettings{ + Image: operatorv1alpha1.Image{ImageTag: "karmada-aggregated-apiserver-image"}, + Replicas: ptr.To[int32](2), + Resources: corev1.ResourceRequirements{}, + ImagePullPolicy: corev1.PullIfNotPresent, + }, + }, + }, + RemoteClientConnector: fakeclientset.NewSimpleClientset(), + FeatureGatesOptions: map[string]bool{ + "Feature1": true, + }, + }, + wantErr: false, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + err := runKarmadaAggregatedAPIServer(test.runData) + if err == nil && test.wantErr { + t.Errorf("expected error, but got none") + } + if err != nil && !test.wantErr { + t.Errorf("unexpected error: %v", err) + } + if err != nil && test.wantErr && !strings.Contains(err.Error(), test.errMsg) { + t.Errorf("expected %s error msg to contain %s", err.Error(), test.errMsg) + } + }) + } +} + +func TestRunWaitKarmadaAggregatedAPIServer(t *testing.T) { + tests := []struct { + name string + runData workflow.RunData + prep func(workflow.RunData) error + wantErr bool + errMsg string + }{ + { + name: "RunWaitKarmadaAggregatedAPIServer_InvalidTypeAssertion_TypeAssertionFailed", + runData: &MyTestData{Data: "test"}, + prep: func(workflow.RunData) error { + return nil + }, + wantErr: true, + errMsg: "wait-KarmadaAggregatedAPIServer task invoked with an invalid data struct", + }, + { + name: "RunWaitKarmadaAggregatedAPIServer_TimeoutWaitingForSomeKarmadaAggregatedAPIServersPods_Timeout", + runData: &TestInitData{ + Name: "karmada-demo", + Namespace: "test", + RemoteClientConnector: fakeclientset.NewSimpleClientset(), + ControlplaneConfigREST: &rest.Config{}, + FeatureGatesOptions: map[string]bool{ + "Feature1": true, + }, + }, + prep: func(workflow.RunData) error { + componentBeReadyTimeout = time.Second + return nil + }, + wantErr: true, + errMsg: "waiting for karmada-aggregated-apiserver to ready timeout", + }, + { + name: "RunWaitKarmadaAggregatedAPIServer_WaitingForSomeKarmadaAggregatedAPIServersPods_KarmadaAggregatedAPIServerIsReady", + runData: &TestInitData{ + Name: "karmada-demo", + Namespace: "test", + RemoteClientConnector: fakeclientset.NewSimpleClientset(), + ControlplaneConfigREST: &rest.Config{}, + FeatureGatesOptions: map[string]bool{ + "Feature1": true, + }, + }, + prep: func(rd workflow.RunData) error { + data := rd.(*TestInitData) + _, err := apiclient.CreatePods(data.RemoteClient(), data.Namespace, util.KarmadaAggregatedAPIServerName(data.Name), 2, karmadaAggregatedAPIServerLabels, true) + return err + }, + wantErr: false, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + if err := test.prep(test.runData); err != nil { + t.Errorf("failed to prep waiting for Karmada Aggregated APIServer: %v", err) + } + err := runWaitKarmadaAggregatedAPIServer(test.runData) + if err == nil && test.wantErr { + t.Errorf("expected error, but got none") + } + if err != nil && !test.wantErr { + t.Errorf("unexpected error: %v", err) + } + if err != nil && test.wantErr && !strings.Contains(err.Error(), test.errMsg) { + t.Errorf("expected %s error msg to contain %s", err.Error(), test.errMsg) + } + }) + } +} diff --git a/operator/pkg/tasks/init/test_helpers.go b/operator/pkg/tasks/init/test_helpers.go new file mode 100644 index 000000000000..e480b5ff8584 --- /dev/null +++ b/operator/pkg/tasks/init/test_helpers.go @@ -0,0 +1,157 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package tasks + +import ( + "fmt" + "strings" + + corev1 "k8s.io/api/core/v1" + clientset "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + + operatorv1alpha1 "github.com/karmada-io/karmada/operator/pkg/apis/operator/v1alpha1" + "github.com/karmada-io/karmada/operator/pkg/certs" +) + +// TestInterface defines the interface for retrieving test data. +type TestInterface interface { + // Get returns the data from the test instance. + Get() string +} + +// MyTestData is a struct that implements the TestInterface. +type MyTestData struct { + Data string +} + +// Get returns the data stored in the MyTestData struct. +func (m *MyTestData) Get() string { + return m.Data +} + +// TestInitData contains the configuration and state required to initialize Karmada components. +type TestInitData struct { + Name string + Namespace string + ControlplaneConfigREST *rest.Config + DataDirectory string + CrdTarballArchive operatorv1alpha1.CRDTarball + KarmadaVersionRelease string + ComponentsUnits *operatorv1alpha1.KarmadaComponents + FeatureGatesOptions map[string]bool + RemoteClientConnector clientset.Interface + KarmadaClientConnector clientset.Interface + ControlplaneAddr string + Certs []*certs.KarmadaCert +} + +// Ensure TestInitData implements InitData interface at compile time. +var _ InitData = &TestInitData{} + +// GetName returns the name of the current Karmada installation. +func (t *TestInitData) GetName() string { + return t.Name +} + +// GetNamespace returns the namespace of the current Karmada installation. +func (t *TestInitData) GetNamespace() string { + return t.Namespace +} + +// SetControlplaneConfig sets the control plane configuration for Karmada. +func (t *TestInitData) SetControlplaneConfig(config *rest.Config) { + t.ControlplaneConfigREST = config +} + +// ControlplaneConfig returns the control plane configuration. +func (t *TestInitData) ControlplaneConfig() *rest.Config { + return t.ControlplaneConfigREST +} + +// ControlplaneAddress returns the address of the control plane. +func (t *TestInitData) ControlplaneAddress() string { + return t.ControlplaneAddr +} + +// RemoteClient returns the Kubernetes client for remote interactions. +func (t *TestInitData) RemoteClient() clientset.Interface { + return t.RemoteClientConnector +} + +// KarmadaClient returns the Kubernetes client for interacting with Karmada. +func (t *TestInitData) KarmadaClient() clientset.Interface { + return t.KarmadaClientConnector +} + +// DataDir returns the data directory used by Karmada. +func (t *TestInitData) DataDir() string { + return t.DataDirectory +} + +// CrdTarball returns the CRD tarball used for Karmada installation. +func (t *TestInitData) CrdTarball() operatorv1alpha1.CRDTarball { + return t.CrdTarballArchive +} + +// KarmadaVersion returns the version of Karmada being used. +func (t *TestInitData) KarmadaVersion() string { + return t.KarmadaVersionRelease +} + +// Components returns the Karmada components used in the current installation. +func (t *TestInitData) Components() *operatorv1alpha1.KarmadaComponents { + return t.ComponentsUnits +} + +// FeatureGates returns the feature gates enabled for the current installation. +func (t *TestInitData) FeatureGates() map[string]bool { + return t.FeatureGatesOptions +} + +// AddCert adds a Karmada certificate to the TestInitData. +func (t *TestInitData) AddCert(cert *certs.KarmadaCert) { + t.Certs = append(t.Certs, cert) +} + +// GetCert retrieves a Karmada certificate by its name. +func (t *TestInitData) GetCert(name string) *certs.KarmadaCert { + for _, cert := range t.Certs { + parts := strings.Split(cert.CertName(), ".") + if parts[0] == name { + return cert + } + } + return nil +} + +// CertList returns a list of all Karmada certificates stored in TestInitData. +func (t *TestInitData) CertList() []*certs.KarmadaCert { + return t.Certs +} + +// LoadCertFromSecret loads a Karmada certificate from a Kubernetes secret. +func (t *TestInitData) LoadCertFromSecret(secret *corev1.Secret) error { + if len(secret.Data) == 0 { + return fmt.Errorf("cert data is empty") + } + + // Dummy implementation: load empty certificate. + cert := &certs.KarmadaCert{} + t.AddCert(cert) + return nil +} diff --git a/operator/pkg/util/apiclient/idempotency.go b/operator/pkg/util/apiclient/idempotency.go index 81f28a6ca697..f95e9a191d35 100644 --- a/operator/pkg/util/apiclient/idempotency.go +++ b/operator/pkg/util/apiclient/idempotency.go @@ -281,6 +281,32 @@ func CreateOrUpdateClusterRole(client clientset.Interface, clusterrole *rbacv1.C return nil } +// CreateOrUpdateClusterRoleBinding creates a Clusterrolebinding if the target resource doesn't exist. +// If the resource exists already, this function will update the resource instead. +func CreateOrUpdateClusterRoleBinding(client clientset.Interface, clusterrolebinding *rbacv1.ClusterRoleBinding) error { + _, err := client.RbacV1().ClusterRoleBindings().Create(context.TODO(), clusterrolebinding, metav1.CreateOptions{}) + + if err != nil { + if !apierrors.IsAlreadyExists(err) { + return err + } + + older, err := client.RbacV1().ClusterRoleBindings().Get(context.TODO(), clusterrolebinding.GetName(), metav1.GetOptions{}) + if err != nil { + return err + } + + clusterrolebinding.ResourceVersion = older.ResourceVersion + _, err = client.RbacV1().ClusterRoleBindings().Update(context.TODO(), clusterrolebinding, metav1.UpdateOptions{}) + if err != nil { + return err + } + } + + klog.V(4).InfoS("Successfully created or updated clusterrolebinding", "clusterrolebinding", clusterrolebinding.GetName()) + return nil +} + // DeleteDeploymentIfHasLabels deletes a Deployment that exists the given labels. func DeleteDeploymentIfHasLabels(client clientset.Interface, name, namespace string, ls labels.Set) error { deployment, err := client.AppsV1().Deployments(namespace).Get(context.TODO(), name, metav1.GetOptions{}) diff --git a/operator/pkg/util/apiclient/test_helpers.go b/operator/pkg/util/apiclient/test_helpers.go new file mode 100644 index 000000000000..efa566ca20e4 --- /dev/null +++ b/operator/pkg/util/apiclient/test_helpers.go @@ -0,0 +1,125 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package apiclient + +import ( + "context" + "fmt" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/discovery" + clientset "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" +) + +// MockK8SRESTClient is a struct that implements clientset.Interface. +type MockK8SRESTClient struct { + clientset.Interface + RESTClientConnector rest.Interface +} + +// Discovery returns a mocked discovery interface. +func (m *MockK8SRESTClient) Discovery() discovery.DiscoveryInterface { + return &MockDiscovery{ + RESTClientConnector: m.RESTClientConnector, + } +} + +// MockDiscovery is a mock implementation of DiscoveryInterface. +type MockDiscovery struct { + discovery.DiscoveryInterface + RESTClientConnector rest.Interface +} + +// RESTClient returns a restClientConnector that is used to communicate with API server +// by this client implementation. +func (m *MockDiscovery) RESTClient() rest.Interface { + return m.RESTClientConnector +} + +// CreatePods creates a specified number of pods in the given namespace +// with the provided component name and optional labels. It uses a +// Kubernetes client to interact with the API and can mark the pods as +// running if the `markRunningState` flag is set. +// +// Parameters: +// - client: Kubernetes client interface for API requests. +// - namespace: Namespace for pod creation. +// - componentName: Base name for the pods and their containers. +// - replicaCount: Number of pods to create. +// - labels: Labels to apply to the pods. +// - markRunningState: If true, updates the pods' status to running. +// +// Returns: +// - A slice of pointers to corev1.Pod representing the created pods. +// - An error if pod creation fails. +func CreatePods(client clientset.Interface, namespace string, componentName string, replicaCount int32, labels map[string]string, markRunningState bool) ([]*corev1.Pod, error) { + pods := make([]*corev1.Pod, 0, replicaCount) + for i := int32(0); i < replicaCount; i++ { + podName := fmt.Sprintf("%s-pod-%d", componentName, i) + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: podName, + Namespace: namespace, + Labels: labels, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Name: fmt.Sprintf("my-%s-container-%d", componentName, i), + Image: fmt.Sprintf("my-%s-image:latest", componentName), + Ports: []corev1.ContainerPort{{ContainerPort: 80}}, + }}, + }, + } + _, err := client.CoreV1().Pods(namespace).Create(context.TODO(), pod, metav1.CreateOptions{}) + if err != nil { + return nil, fmt.Errorf("failed to create pod %s: %w", podName, err) + } + + // Mark the pod as in running state if flag is set. + if markRunningState { + if err := UpdatePodStatus(client, pod); err != nil { + return nil, fmt.Errorf("failed to update pod status, got err: %v", err) + } + } + pods = append(pods, pod) + } + return pods, nil +} + +// UpdatePodStatus updates the status of a pod to PodRunning and sets the PodReady condition. +func UpdatePodStatus(client clientset.Interface, pod *corev1.Pod) error { + // Mark the pod as in running state. + pod.Status = corev1.PodStatus{ + Phase: corev1.PodRunning, + Conditions: []corev1.PodCondition{ + { + Type: corev1.PodReady, + Status: corev1.ConditionTrue, + }, + }, + } + + // Update the pod status in the Kubernetes cluster. + _, err := client.CoreV1().Pods(pod.GetNamespace()).UpdateStatus(context.TODO(), pod, metav1.UpdateOptions{}) + if err != nil { + return fmt.Errorf("failed to update status of the pod %s: %w", pod.GetName(), err) + } + + return nil +} diff --git a/operator/pkg/util/endpoint_test.go b/operator/pkg/util/endpoint_test.go new file mode 100644 index 000000000000..0a12c24a1a08 --- /dev/null +++ b/operator/pkg/util/endpoint_test.go @@ -0,0 +1,182 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "context" + "fmt" + "strings" + "testing" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + clientset "k8s.io/client-go/kubernetes" + fakeclientset "k8s.io/client-go/kubernetes/fake" +) + +func TestGetControlPlaneEndpoint(t *testing.T) { + tests := []struct { + name string + address string + port string + wantErr bool + wantEndpoint string + }{ + { + name: "GetControlplaneEndpoint_InvalidAddress_AddressIsInvalid", + address: "192.168:1:1", + port: "6060", + wantErr: true, + }, + { + name: "GetControlplaneEndpoint_ParseEndpoint_EndpointParsed", + address: "192.168.1.1", + port: "6060", + wantErr: false, + wantEndpoint: "https://192.168.1.1:6060", + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + controlPlaneEndpoint, err := GetControlplaneEndpoint(test.address, test.port) + if err == nil && test.wantErr { + t.Errorf("expected an error, but got none") + } + if err != nil && !test.wantErr { + t.Errorf("unexpected error, got: %v", err) + } + if controlPlaneEndpoint != test.wantEndpoint { + t.Errorf("expected endpoint %s, but got %s", test.wantEndpoint, controlPlaneEndpoint) + } + }) + } +} + +func TestGetAPIServiceIP(t *testing.T) { + tests := []struct { + name string + client clientset.Interface + prep func(clientset.Interface) error + wantAPIServiceIP string + wantErr bool + errMsg string + }{ + { + name: "GetAPIServiceIP_WithNoNodesInTheCluster_FailedToGetAPIServiceIP", + client: fakeclientset.NewSimpleClientset(), + prep: func(clientset.Interface) error { return nil }, + wantErr: true, + errMsg: "there are no nodes in cluster", + }, + { + name: "GetAPIServiceIP_WithoutMasterNode_WorkerNodeAPIServiceIPReturned", + client: fakeclientset.NewSimpleClientset(), + prep: func(client clientset.Interface) error { + nodes := []*corev1.Node{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "node-1", + }, + Status: corev1.NodeStatus{ + Addresses: []corev1.NodeAddress{ + { + Type: corev1.NodeInternalIP, + Address: "192.168.1.2", + }, + }, + }, + }, + } + for _, node := range nodes { + _, err := client.CoreV1().Nodes().Create(context.TODO(), node, metav1.CreateOptions{}) + if err != nil { + return fmt.Errorf("failed to create node %s: %v", node.Name, err) + } + } + return nil + }, + wantErr: false, + wantAPIServiceIP: "192.168.1.2", + }, + { + name: "GetAPIServiceIP_WithMasterNode_MasterNodeAPIServiceIPReturned", + client: fakeclientset.NewSimpleClientset(), + prep: func(client clientset.Interface) error { + nodes := []*corev1.Node{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "node-1", + Labels: map[string]string{ + "node-role.kubernetes.io/master": "", + }, + }, + Status: corev1.NodeStatus{ + Addresses: []corev1.NodeAddress{ + { + Type: corev1.NodeInternalIP, + Address: "192.168.1.1", + }, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "node-2", + }, + Status: corev1.NodeStatus{ + Addresses: []corev1.NodeAddress{ + { + Type: corev1.NodeInternalIP, + Address: "192.168.1.2", + }, + }, + }, + }, + } + for _, node := range nodes { + _, err := client.CoreV1().Nodes().Create(context.TODO(), node, metav1.CreateOptions{}) + if err != nil { + return fmt.Errorf("failed to create node %s: %v", node.Name, err) + } + } + return nil + }, + wantErr: false, + wantAPIServiceIP: "192.168.1.1", + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + if err := test.prep(test.client); err != nil { + t.Errorf("failed to prep before getting API service IP: %v", err) + } + apiServiceIP, err := GetAPIServiceIP(test.client) + if err == nil && test.wantErr { + t.Errorf("expected an error, but got none") + } + if err != nil && !test.wantErr { + t.Errorf("unexpected error, got: %v", err) + } + if err != nil && test.wantErr && !strings.Contains(err.Error(), test.errMsg) { + t.Errorf("expected error message %s to be in %s", test.errMsg, err.Error()) + } + if apiServiceIP != test.wantAPIServiceIP { + t.Errorf("expected API service IP %s, but got %s", test.wantAPIServiceIP, apiServiceIP) + } + }) + } +} diff --git a/operator/pkg/util/kubeconfig.go b/operator/pkg/util/kubeconfig.go index 6fb65b7adade..792fe89c3bf8 100644 --- a/operator/pkg/util/kubeconfig.go +++ b/operator/pkg/util/kubeconfig.go @@ -61,13 +61,18 @@ func CreateBasic(serverURL, clusterName, userName string, caCert []byte) *client } } -// IsInCluster returns a bool represents whether the remote cluster is the local or not. +// IsInCluster checks if the specified host cluster is the local cluster. +// It returns true if: +// - the hostCluster is nil; +// - or its SecretRef is nil; +// - or the SecretRef's Name is an empty string. +// This indicates that the remote cluster is either not configured or not identifiable as the local cluster. func IsInCluster(hostCluster *operatorv1alpha1.HostCluster) bool { return hostCluster == nil || hostCluster.SecretRef == nil || len(hostCluster.SecretRef.Name) == 0 } // BuildClientFromSecretRef builds a clientset from the secret reference. -func BuildClientFromSecretRef(client *clientset.Clientset, ref *operatorv1alpha1.LocalSecretReference) (*clientset.Clientset, error) { +func BuildClientFromSecretRef(client clientset.Interface, ref *operatorv1alpha1.LocalSecretReference) (clientset.Interface, error) { secret, err := client.CoreV1().Secrets(ref.Namespace).Get(context.TODO(), ref.Name, metav1.GetOptions{}) if err != nil { return nil, err @@ -81,7 +86,7 @@ func BuildClientFromSecretRef(client *clientset.Clientset, ref *operatorv1alpha1 return newClientSetForConfig(kubeconfigBytes) } -func newClientSetForConfig(kubeconfig []byte) (*clientset.Clientset, error) { +func newClientSetForConfig(kubeconfig []byte) (clientset.Interface, error) { clientConfig, err := clientcmd.NewClientConfigFromBytes(kubeconfig) if err != nil { return nil, err diff --git a/operator/pkg/util/kubeconfig_test.go b/operator/pkg/util/kubeconfig_test.go new file mode 100644 index 000000000000..d2f914c6ac9f --- /dev/null +++ b/operator/pkg/util/kubeconfig_test.go @@ -0,0 +1,252 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "context" + "crypto/rand" + "crypto/rsa" + "crypto/x509" + "encoding/base64" + "encoding/pem" + "errors" + "fmt" + "math/big" + "strings" + "testing" + "time" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + clientset "k8s.io/client-go/kubernetes" + fakeclientset "k8s.io/client-go/kubernetes/fake" + coretesting "k8s.io/client-go/testing" + + operatorv1alpha1 "github.com/karmada-io/karmada/operator/pkg/apis/operator/v1alpha1" +) + +func TestBuildClientFromSecretRef(t *testing.T) { + name, namespace := "test-secret", "test" + token := "my-sample-token" + kubeconfig := ` +apiVersion: v1 +clusters: +- cluster: + server: https://127.0.0.1:6443 + certificate-authority-data: %s + name: test-cluster +contexts: +- context: + cluster: test-cluster + user: test-user + name: test-context +current-context: test-context +kind: Config +preferences: {} +users: +- name: test-user + user: + token: %s +` + tests := []struct { + name string + client clientset.Interface + ref *operatorv1alpha1.LocalSecretReference + wantErr bool + prep func(clientset.Interface) error + errMsg string + }{ + { + name: "BuildClientFromSecretRef_GotNetworkIssue_FailedToBuildClient", + client: fakeclientset.NewSimpleClientset(), + ref: &operatorv1alpha1.LocalSecretReference{ + Name: name, + Namespace: namespace, + }, + prep: func(client clientset.Interface) error { + client.(*fakeclientset.Clientset).Fake.PrependReactor("get", "secrets", func(coretesting.Action) (bool, runtime.Object, error) { + return true, nil, errors.New("unexpected error: encountered a network issue while getting the secrets") + }) + return nil + }, + wantErr: true, + errMsg: "unexpected error: encountered a network issue while getting the secrets", + }, + { + name: "BuildClientFromSecretRef_WithoutKubeConfig_KubeConfigIsNotFound", + client: fakeclientset.NewSimpleClientset(), + ref: &operatorv1alpha1.LocalSecretReference{ + Name: name, + Namespace: namespace, + }, + prep: func(client clientset.Interface) error { + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + Data: map[string][]byte{}, + } + _, err := client.CoreV1().Secrets(namespace).Create(context.TODO(), secret, metav1.CreateOptions{}) + if err != nil { + return fmt.Errorf("failed to create secret %s in %s namespace, got err: %v", name, namespace, err) + } + return nil + }, + wantErr: true, + errMsg: "the kubeconfig or data key 'kubeconfig' is not found", + }, + { + name: "BuildClientFromSecretRef_WithKubeconfig_ClientIsBuilt", + client: fakeclientset.NewSimpleClientset(), + ref: &operatorv1alpha1.LocalSecretReference{ + Name: name, + Namespace: namespace, + }, + prep: func(client clientset.Interface) error { + // Generate kubeconfig bytes. + caCert, err := generateTestCACertificate() + if err != nil { + return fmt.Errorf("failed to generate CA certificate: %v", err) + } + base64CACert := base64.StdEncoding.EncodeToString([]byte(caCert)) + base64Token := base64.StdEncoding.EncodeToString([]byte(token)) + kubeconfigBytes := []byte(fmt.Sprintf(kubeconfig, base64CACert, base64Token)) + + // Create secret with kubeconfig data. + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + Data: map[string][]byte{"kubeconfig": kubeconfigBytes}, + } + _, err = client.CoreV1().Secrets(namespace).Create(context.TODO(), secret, metav1.CreateOptions{}) + if err != nil { + return fmt.Errorf("failed to create secret %s in %s namespace, got err: %v", name, namespace, err) + } + return nil + }, + wantErr: false, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + if err := test.prep(test.client); err != nil { + t.Errorf("failed to prep before building client from secret ref: %v", err) + } + _, err := BuildClientFromSecretRef(test.client, test.ref) + if err == nil && test.wantErr { + t.Errorf("expected an error, but got none") + } + if err != nil && !test.wantErr { + t.Errorf("unexpected error, got: %v", err) + } + if err != nil && test.wantErr && !strings.Contains(err.Error(), test.errMsg) { + t.Errorf("expceted error message %s to be in %s", test.errMsg, err.Error()) + } + }) + } +} + +func TestIsInCluster(t *testing.T) { + tests := []struct { + name string + hostCluster *operatorv1alpha1.HostCluster + want bool + }{ + { + name: "IsInCluster_WithoutHostCluster_ItIsLocal", + hostCluster: nil, + want: true, + }, + { + name: "IsInCluster_WithoutSecretRef_ItIsLocal", + hostCluster: &operatorv1alpha1.HostCluster{ + SecretRef: nil, + }, + want: true, + }, + { + name: "IsInCluster_WithoutSecretRefName_ItIsLocal", + hostCluster: &operatorv1alpha1.HostCluster{ + SecretRef: &operatorv1alpha1.LocalSecretReference{ + Name: "", + }, + }, + want: true, + }, + { + name: "IsInCluster_WithAllRemoteClusterConfigurations_ItIsRemote", + hostCluster: &operatorv1alpha1.HostCluster{ + SecretRef: &operatorv1alpha1.LocalSecretReference{ + Name: "remote-secret", + Namespace: "test", + }, + }, + want: false, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + if got := IsInCluster(test.hostCluster); got != test.want { + t.Errorf("expected host cluster local status to be %t, but got %t", test.want, got) + } + }) + } +} + +// generateTestCACertificate returns a self-signed CA certificate as a PEM string. +func generateTestCACertificate() (string, error) { + // Generate a new RSA private key + priv, err := rsa.GenerateKey(rand.Reader, 2048) + if err != nil { + return "", err + } + + // Set the certificate parameters. + notBefore := time.Now() + notAfter := notBefore.Add(365 * 24 * time.Hour) + + serialNumber, err := rand.Int(rand.Reader, big.NewInt(1<<62)) + if err != nil { + return "", err + } + + cert := &x509.Certificate{ + SerialNumber: serialNumber, + NotBefore: notBefore, + NotAfter: notAfter, + KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth}, + BasicConstraintsValid: true, + IsCA: true, + } + + // Create the certificate. + certDER, err := x509.CreateCertificate(rand.Reader, cert, cert, &priv.PublicKey, priv) + if err != nil { + return "", err + } + + // PEM encode the certificate. + certPEM := &pem.Block{Type: "CERTIFICATE", Bytes: certDER} + certPEMData := pem.EncodeToMemory(certPEM) + + return string(certPEMData), nil +} diff --git a/operator/pkg/util/naming_test.go b/operator/pkg/util/naming_test.go new file mode 100644 index 000000000000..42aafb4727a8 --- /dev/null +++ b/operator/pkg/util/naming_test.go @@ -0,0 +1,46 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import "testing" + +func TestGenerateResourceName(t *testing.T) { + tests := []struct { + name string + component string + suffix string + want string + }{ + { + name: "GenerateResourceName_WithKarmada_SuffixImmediatelyAfter", + component: "karmada-demo", + suffix: "search", + want: "karmada-demo-search", + }, + { + name: "GenerateResourceName_WithoutKarmada_KarmadaInTheMiddle", + component: "test-demo", + suffix: "search", + want: "test-demo-karmada-search", + }, + } + for _, test := range tests { + if got := generateResourceName(test.component, test.suffix); got != test.want { + t.Errorf("expected resource name generated to be %s, but got %s", test.want, got) + } + } +} diff --git a/operator/pkg/util/template_test.go b/operator/pkg/util/template_test.go new file mode 100644 index 000000000000..9cbf75d4d689 --- /dev/null +++ b/operator/pkg/util/template_test.go @@ -0,0 +1,82 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "bytes" + "strings" + "testing" +) + +func TestParseTemplate(t *testing.T) { + tests := []struct { + name string + template string + args interface{} + want []byte + wantErr bool + errMsg string + }{ + { + name: "ParseTemplate_WithMissingVariable_ParsingFailed", + template: "Hello, {{.Name}}!", + args: struct{ Missing string }{Missing: "World"}, + want: nil, + wantErr: true, + errMsg: "error when executing template", + }, + { + name: "ParseTemplate_WithInvalidTemplateSyntax_ParsingFailed", + template: "Hello, {{.Name!", + args: struct{ Name string }{Name: "World"}, + want: nil, + wantErr: true, + errMsg: "error when parsing template", + }, + { + name: "ParseTemplate_ValidTemplateWithVariable_ParsingSucceeded", + template: "Hello, {{.Name}}!", + args: struct{ Name string }{Name: "World"}, + want: []byte("Hello, World!"), + wantErr: false, + }, + { + name: "ParseTemplate_EmptyTemplate_ParsingSucceeded", + template: "", + args: nil, + want: []byte(""), + wantErr: false, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + got, err := ParseTemplate(test.template, test.args) + if err == nil && test.wantErr { + t.Errorf("expected an error, but got none") + } + if err != nil && !test.wantErr { + t.Errorf("unexpected error, got: %v", err) + } + if err != nil && test.wantErr && !strings.Contains(err.Error(), test.errMsg) { + t.Errorf("expected error message %s to be in %s", test.errMsg, err.Error()) + } + if !bytes.Equal(got, test.want) { + t.Errorf("expected parsed template bytes to be %v, but got %v", test.want, got) + } + }) + } +} diff --git a/operator/pkg/util/util.go b/operator/pkg/util/util.go index 6214527211c5..99365b2d11e4 100644 --- a/operator/pkg/util/util.go +++ b/operator/pkg/util/util.go @@ -29,12 +29,28 @@ import ( "strings" "time" + clientset "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" "k8s.io/klog/v2" "sigs.k8s.io/yaml" + operatorv1alpha1 "github.com/karmada-io/karmada/operator/pkg/apis/operator/v1alpha1" + "github.com/karmada-io/karmada/operator/pkg/workflow" "github.com/karmada-io/karmada/pkg/util" ) +var ( + // ClientFactory creates a new Kubernetes clientset from the provided kubeconfig. + ClientFactory = func(kubeconfig *rest.Config) (clientset.Interface, error) { + return clientset.NewForConfig(kubeconfig) + } + + // BuildClientFromSecretRefFactory constructs a Kubernetes clientset using a LocalSecretReference. + BuildClientFromSecretRefFactory = func(client clientset.Interface, ref *operatorv1alpha1.LocalSecretReference) (clientset.Interface, error) { + return BuildClientFromSecretRef(client, ref) + } +) + // Downloader Download progress type Downloader struct { io.Reader @@ -222,3 +238,51 @@ func ReplaceYamlForReg(path, destResource string, reg *regexp.Regexp) ([]byte, e repl := reg.ReplaceAllString(string(data), destResource) return yaml.YAMLToJSON([]byte(repl)) } + +// ContainAllTasks checks if all tasks in the subset are present in the tasks slice. +// Returns an error if any subset task is not found; nil otherwise. +func ContainAllTasks(tasks, subset []workflow.Task) error { + for _, subsetTask := range subset { + found := false + for _, task := range tasks { + found = DeepEqualTasks(task, subsetTask) == nil + if found { + break + } + } + if !found { + return fmt.Errorf("subset task %v not found in tasks", subsetTask) + } + } + return nil +} + +// DeepEqualTasks checks if two workflow.Task instances are deeply equal. +// It returns an error if they differ, or nil if they are equal. +// The comparison includes the task name, RunSubTasks flag, +// and the length and contents of the Tasks slice. +// Function references and behavior are not compared; only the values +// of the specified fields are considered. Any differences are detailed +// in the returned error. +func DeepEqualTasks(t1, t2 workflow.Task) error { + if t1.Name != t2.Name { + return fmt.Errorf("expected t1 name %s, but got %s", t2.Name, t1.Name) + } + + if t1.RunSubTasks != t2.RunSubTasks { + return fmt.Errorf("expected t1 RunSubTasks flag %t, but got %t", t2.RunSubTasks, t1.RunSubTasks) + } + + if len(t1.Tasks) != len(t2.Tasks) { + return fmt.Errorf("expected t1 tasks length %d, but got %d", len(t2.Tasks), len(t1.Tasks)) + } + + for index := range t1.Tasks { + err := DeepEqualTasks(t1.Tasks[index], t2.Tasks[index]) + if err != nil { + return fmt.Errorf("unexpected error; tasks are not equal at index %d: %v", index, err) + } + } + + return nil +} diff --git a/pkg/apis/policy/v1alpha1/override_types.go b/pkg/apis/policy/v1alpha1/override_types.go index c57e20381662..6c8b3d47bd86 100644 --- a/pkg/apis/policy/v1alpha1/override_types.go +++ b/pkg/apis/policy/v1alpha1/override_types.go @@ -101,6 +101,7 @@ type RuleWithCluster struct { // - ArgsOverrider // - LabelsOverrider // - AnnotationsOverrider +// - FieldOverrider // - Plaintext type Overriders struct { // Plaintext represents override rules defined with plaintext overriders. @@ -126,6 +127,13 @@ type Overriders struct { // AnnotationsOverrider represents the rules dedicated to handling workload annotations // +optional AnnotationsOverrider []LabelAnnotationOverrider `json:"annotationsOverrider,omitempty"` + + // FieldOverrider represents the rules dedicated to modifying a specific field in any Kubernetes resource. + // This allows changing a single field within the resource with multiple operations. + // It is designed to handle structured field values such as those found in ConfigMaps or Secrets. + // The current implementation supports JSON and YAML formats, but can easily be extended to support XML in the future. + // +optional + FieldOverrider []FieldOverrider `json:"fieldOverrider,omitempty"` } // LabelAnnotationOverrider represents the rules dedicated to handling workload labels/annotations @@ -255,6 +263,65 @@ const ( OverriderOpReplace OverriderOperator = "replace" ) +// FieldOverrider represents the rules dedicated to modifying a specific field in any Kubernetes resource. +// This allows changing a single field within the resource with multiple operations. +// It is designed to handle structured field values such as those found in ConfigMaps or Secrets. +// The current implementation supports JSON and YAML formats, but can easily be extended to support XML in the future. +// Note: In any given instance, FieldOverrider processes either JSON or YAML fields, but not both simultaneously. +type FieldOverrider struct { + // FieldPath specifies the initial location in the instance document where the operation should take place. + // The path uses RFC 6901 for navigating into nested structures. For example, the path "/data/db-config.yaml" + // specifies the configuration data key named "db-config.yaml" in a ConfigMap: "/data/db-config.yaml". + // +required + FieldPath string `json:"fieldPath"` + + // JSON represents the operations performed on the JSON document specified by the FieldPath. + // +optional + JSON []JSONPatchOperation `json:"json,omitempty"` + + // YAML represents the operations performed on the YAML document specified by the FieldPath. + // +optional + YAML []YAMLPatchOperation `json:"yaml,omitempty"` +} + +// JSONPatchOperation represents a single field modification operation for JSON format. +type JSONPatchOperation struct { + // SubPath specifies the relative location within the initial FieldPath where the operation should take place. + // The path uses RFC 6901 for navigating into nested structures. + // +required + SubPath string `json:"subPath"` + + // Operator indicates the operation on target field. + // Available operators are: "add", "remove", and "replace". + // +kubebuilder:validation:Enum=add;remove;replace + // +required + Operator OverriderOperator `json:"operator"` + + // Value is the new value to set for the specified field if the operation is "add" or "replace". + // For "remove" operation, this field is ignored. + // +optional + Value apiextensionsv1.JSON `json:"value,omitempty"` +} + +// YAMLPatchOperation represents a single field modification operation for YAML format. +type YAMLPatchOperation struct { + // SubPath specifies the relative location within the initial FieldPath where the operation should take place. + // The path uses RFC 6901 for navigating into nested structures. + // +required + SubPath string `json:"subPath"` + + // Operator indicates the operation on target field. + // Available operators are: "add", "remove", and "replace". + // +kubebuilder:validation:Enum=add;remove;replace + // +required + Operator OverriderOperator `json:"operator"` + + // Value is the new value to set for the specified field if the operation is "add" or "replace". + // For "remove" operation, this field is ignored. + // +optional + Value apiextensionsv1.JSON `json:"value,omitempty"` +} + // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // OverridePolicyList is a collection of OverridePolicy. diff --git a/pkg/apis/policy/v1alpha1/propagation_types.go b/pkg/apis/policy/v1alpha1/propagation_types.go index ee6a38a85867..60be208a4ffc 100644 --- a/pkg/apis/policy/v1alpha1/propagation_types.go +++ b/pkg/apis/policy/v1alpha1/propagation_types.go @@ -181,6 +181,25 @@ type PropagationSpec struct { // nil means no suspension. no default values. // +optional Suspension *Suspension `json:"suspension,omitempty"` + + // PreserveResourcesOnDeletion controls whether resources should be preserved on the + // member clusters when the resource template is deleted. + // If set to true, resources will be preserved on the member clusters. + // Default is false, which means resources will be deleted along with the resource template. + // + // This setting is particularly useful during workload migration scenarios to ensure + // that rollback can occur quickly without affecting the workloads running on the + // member clusters. + // + // Additionally, this setting applies uniformly across all member clusters and will not + // selectively control preservation on only some clusters. + // + // Note: This setting does not apply to the deletion of the policy itself. + // When the policy is deleted, the resource templates and their corresponding + // propagated resources in member clusters will remain unchanged unless explicitly deleted. + // + // +optional + PreserveResourcesOnDeletion *bool `json:"preserveResourcesOnDeletion,omitempty"` } // ResourceSelector the resources will be selected. diff --git a/pkg/apis/policy/v1alpha1/zz_generated.deepcopy.go b/pkg/apis/policy/v1alpha1/zz_generated.deepcopy.go index 764fa323ef39..c1683072ed54 100644 --- a/pkg/apis/policy/v1alpha1/zz_generated.deepcopy.go +++ b/pkg/apis/policy/v1alpha1/zz_generated.deepcopy.go @@ -453,6 +453,36 @@ func (in *FederatedResourceQuotaStatus) DeepCopy() *FederatedResourceQuotaStatus return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FieldOverrider) DeepCopyInto(out *FieldOverrider) { + *out = *in + if in.JSON != nil { + in, out := &in.JSON, &out.JSON + *out = make([]JSONPatchOperation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.YAML != nil { + in, out := &in.YAML, &out.YAML + *out = make([]YAMLPatchOperation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FieldOverrider. +func (in *FieldOverrider) DeepCopy() *FieldOverrider { + if in == nil { + return nil + } + out := new(FieldOverrider) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *FieldSelector) DeepCopyInto(out *FieldSelector) { *out = *in @@ -513,6 +543,23 @@ func (in *ImagePredicate) DeepCopy() *ImagePredicate { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JSONPatchOperation) DeepCopyInto(out *JSONPatchOperation) { + *out = *in + in.Value.DeepCopyInto(&out.Value) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JSONPatchOperation. +func (in *JSONPatchOperation) DeepCopy() *JSONPatchOperation { + if in == nil { + return nil + } + out := new(JSONPatchOperation) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *LabelAnnotationOverrider) DeepCopyInto(out *LabelAnnotationOverrider) { *out = *in @@ -677,6 +724,13 @@ func (in *Overriders) DeepCopyInto(out *Overriders) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.FieldOverrider != nil { + in, out := &in.FieldOverrider, &out.FieldOverrider + *out = make([]FieldOverrider, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } return } @@ -843,6 +897,11 @@ func (in *PropagationSpec) DeepCopyInto(out *PropagationSpec) { *out = new(Suspension) (*in).DeepCopyInto(*out) } + if in.PreserveResourcesOnDeletion != nil { + in, out := &in.PreserveResourcesOnDeletion, &out.PreserveResourcesOnDeletion + *out = new(bool) + **out = **in + } return } @@ -1022,3 +1081,20 @@ func (in *Suspension) DeepCopy() *Suspension { in.DeepCopyInto(out) return out } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *YAMLPatchOperation) DeepCopyInto(out *YAMLPatchOperation) { + *out = *in + in.Value.DeepCopyInto(&out.Value) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new YAMLPatchOperation. +func (in *YAMLPatchOperation) DeepCopy() *YAMLPatchOperation { + if in == nil { + return nil + } + out := new(YAMLPatchOperation) + in.DeepCopyInto(out) + return out +} diff --git a/pkg/apis/work/v1alpha1/work_types.go b/pkg/apis/work/v1alpha1/work_types.go index f1e03ee3e79c..4f136df06648 100644 --- a/pkg/apis/work/v1alpha1/work_types.go +++ b/pkg/apis/work/v1alpha1/work_types.go @@ -60,9 +60,17 @@ type WorkSpec struct { // SuspendDispatching controls whether dispatching should // be suspended, nil means not suspend. - // Note: true means stop propagating to all clusters. + // Note: true means stop propagating to the corresponding member cluster, and + // does not prevent status collection. // +optional SuspendDispatching *bool `json:"suspendDispatching,omitempty"` + + // PreserveResourcesOnDeletion controls whether resources should be preserved on the + // member cluster when the Work object is deleted. + // If set to true, resources will be preserved on the member cluster. + // Default is false, which means resources will be deleted along with the Work object. + // +optional + PreserveResourcesOnDeletion *bool `json:"preserveResourcesOnDeletion,omitempty"` } // WorkloadTemplate represents the manifest workload to be deployed on managed cluster. diff --git a/pkg/apis/work/v1alpha1/zz_generated.deepcopy.go b/pkg/apis/work/v1alpha1/zz_generated.deepcopy.go index 824f379202d6..1adbecfdf79d 100644 --- a/pkg/apis/work/v1alpha1/zz_generated.deepcopy.go +++ b/pkg/apis/work/v1alpha1/zz_generated.deepcopy.go @@ -386,6 +386,11 @@ func (in *WorkSpec) DeepCopyInto(out *WorkSpec) { *out = new(bool) **out = **in } + if in.PreserveResourcesOnDeletion != nil { + in, out := &in.PreserveResourcesOnDeletion, &out.PreserveResourcesOnDeletion + *out = new(bool) + **out = **in + } return } diff --git a/pkg/apis/work/v1alpha2/binding_types.go b/pkg/apis/work/v1alpha2/binding_types.go index 25952770af22..7797fb3c60fb 100644 --- a/pkg/apis/work/v1alpha2/binding_types.go +++ b/pkg/apis/work/v1alpha2/binding_types.go @@ -151,6 +151,14 @@ type ResourceBindingSpec struct { // nil means no suspension. no default values. // +optional Suspension *policyv1alpha1.Suspension `json:"suspension,omitempty"` + + // PreserveResourcesOnDeletion controls whether resources should be preserved on the + // member clusters when the binding object is deleted. + // If set to true, resources will be preserved on the member clusters. + // Default is false, which means resources will be deleted along with the binding object. + // This setting applies to all Work objects created under this binding object. + // +optional + PreserveResourcesOnDeletion *bool `json:"preserveResourcesOnDeletion,omitempty"` } // ObjectReference contains enough information to locate the referenced object inside current cluster. diff --git a/pkg/apis/work/v1alpha2/zz_generated.deepcopy.go b/pkg/apis/work/v1alpha2/zz_generated.deepcopy.go index 826fea036ee4..824a7d4f5a08 100644 --- a/pkg/apis/work/v1alpha2/zz_generated.deepcopy.go +++ b/pkg/apis/work/v1alpha2/zz_generated.deepcopy.go @@ -353,6 +353,11 @@ func (in *ResourceBindingSpec) DeepCopyInto(out *ResourceBindingSpec) { *out = new(v1alpha1.Suspension) (*in).DeepCopyInto(*out) } + if in.PreserveResourcesOnDeletion != nil { + in, out := &in.PreserveResourcesOnDeletion, &out.PreserveResourcesOnDeletion + *out = new(bool) + **out = **in + } return } diff --git a/pkg/controllers/multiclusterservice/endpointslice_collect_controller_test.go b/pkg/controllers/multiclusterservice/endpointslice_collect_controller_test.go new file mode 100644 index 000000000000..011f8e4ac4d8 --- /dev/null +++ b/pkg/controllers/multiclusterservice/endpointslice_collect_controller_test.go @@ -0,0 +1,387 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package multiclusterservice + +import ( + "context" + "reflect" + "sort" + "strings" + "sync" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + discoveryv1 "k8s.io/api/discovery/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/tools/cache" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + workv1alpha1 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha1" + "github.com/karmada-io/karmada/pkg/util" +) + +func TestGetEventHandler(t *testing.T) { + testCases := []struct { + name string + clusterName string + existingHandler bool + }{ + { + name: "New handler", + clusterName: "cluster1", + existingHandler: false, + }, + { + name: "Existing handler", + clusterName: "cluster2", + existingHandler: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + controller := &EndpointSliceCollectController{ + eventHandlers: sync.Map{}, + worker: &mockAsyncWorker{}, + } + if tc.existingHandler { + controller.eventHandlers.Store(tc.clusterName, &mockResourceEventHandler{}) + } + handler := controller.getEventHandler(tc.clusterName) + assert.NotNil(t, handler, "Handler should not be nil") + storedHandler, exists := controller.eventHandlers.Load(tc.clusterName) + assert.True(t, exists, "Handler should be stored in eventHandlers") + assert.Equal(t, handler, storedHandler, "Stored handler should match returned handler") + if !tc.existingHandler { + assert.IsType(t, &cache.ResourceEventHandlerFuncs{}, handler, "New handler should be of type *cache.ResourceEventHandlerFuncs") + } else { + assert.IsType(t, &mockResourceEventHandler{}, handler, "Existing handler should be of type *mockResourceEventHandler") + } + }) + } +} + +func TestGenHandlerFuncs(t *testing.T) { + clusterName := "test-cluster" + testObj := createTestEndpointSlice("test-object", "test-namespace") + + t.Run("AddFunc", func(t *testing.T) { + mockWorker := &mockAsyncWorker{} + controller := &EndpointSliceCollectController{ + worker: mockWorker, + } + addFunc := controller.genHandlerAddFunc(clusterName) + addFunc(testObj) + assert.Equal(t, 1, mockWorker.addCount, "Add function should be called once") + }) + + t.Run("UpdateFunc", func(t *testing.T) { + mockWorker := &mockAsyncWorker{} + controller := &EndpointSliceCollectController{ + worker: mockWorker, + } + updateFunc := controller.genHandlerUpdateFunc(clusterName) + newObj := createTestEndpointSlice("test-object", "test-namespace") + newObj.SetLabels(map[string]string{"new-label": "new-value"}) + + updateFunc(testObj, newObj) + assert.Equal(t, 1, mockWorker.addCount, "Update function should be called once when objects are different") + + updateFunc(testObj, testObj) + assert.Equal(t, 1, mockWorker.addCount, "Update function should not be called when objects are the same") + }) + + t.Run("DeleteFunc", func(t *testing.T) { + mockWorker := &mockAsyncWorker{} + controller := &EndpointSliceCollectController{ + worker: mockWorker, + } + deleteFunc := controller.genHandlerDeleteFunc(clusterName) + deleteFunc(testObj) + assert.Equal(t, 1, mockWorker.addCount, "Delete function should be called once") + + deletedObj := cache.DeletedFinalStateUnknown{Obj: testObj} + deleteFunc(deletedObj) + assert.Equal(t, 2, mockWorker.addCount, "Delete function should be called for DeletedFinalStateUnknown") + }) +} + +func TestGetEndpointSliceWorkMeta(t *testing.T) { + testCases := []struct { + name string + existingWork *workv1alpha1.Work + endpointSlice *unstructured.Unstructured + expectedMeta metav1.ObjectMeta + expectedError bool + }{ + { + name: "New work for EndpointSlice", + endpointSlice: createEndpointSliceForTest("test-eps", "default", "test-service", false), + expectedMeta: metav1.ObjectMeta{ + Name: "endpointslice-test-eps-default", + Namespace: "test-cluster", + Labels: map[string]string{ + util.MultiClusterServiceNamespaceLabel: "default", + util.MultiClusterServiceNameLabel: "test-service", + util.PropagationInstruction: util.PropagationInstructionSuppressed, + util.EndpointSliceWorkManagedByLabel: util.MultiClusterServiceKind, + }, + }, + }, + { + name: "Existing work for EndpointSlice", + existingWork: createExistingWork("endpointslice-test-eps-default", "test-cluster", "ExistingController"), + endpointSlice: createEndpointSliceForTest("test-eps", "default", "test-service", false), + expectedMeta: metav1.ObjectMeta{ + Name: "endpointslice-test-eps-default", + Namespace: "test-cluster", + Labels: map[string]string{ + util.MultiClusterServiceNamespaceLabel: "default", + util.MultiClusterServiceNameLabel: "test-service", + util.PropagationInstruction: util.PropagationInstructionSuppressed, + util.EndpointSliceWorkManagedByLabel: "ExistingController.MultiClusterService", + }, + Finalizers: []string{util.MCSEndpointSliceDispatchControllerFinalizer}, + }, + }, + { + name: "Nil EndpointSlice", + endpointSlice: nil, + expectedError: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + fakeClient := createFakeClient(tc.existingWork) + testFunc := func() (metav1.ObjectMeta, error) { + return getEndpointSliceWorkMeta(context.TODO(), fakeClient, "test-cluster", "endpointslice-test-eps-default", tc.endpointSlice) + } + if tc.expectedError { + assert.Panics(t, func() { + _, err := testFunc() + require.Error(t, err) + }, "Expected a panic for nil EndpointSlice") + } else { + meta, err := testFunc() + require.NoError(t, err) + assert.Equal(t, tc.expectedMeta.Name, meta.Name) + assert.Equal(t, tc.expectedMeta.Namespace, meta.Namespace) + assert.Equal(t, tc.expectedMeta.Finalizers, meta.Finalizers) + assert.True(t, compareLabels(meta.Labels, tc.expectedMeta.Labels), + "Labels do not match. Expected: %v, Got: %v", tc.expectedMeta.Labels, meta.Labels) + } + }) + } +} + +func TestCleanProviderClustersEndpointSliceWork(t *testing.T) { + testCases := []struct { + name string + existingWork *workv1alpha1.Work + expectedWork *workv1alpha1.Work + expectedDelete bool + }{ + { + name: "Work managed by multiple controllers", + existingWork: &workv1alpha1.Work{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-work", + Namespace: "test-cluster", + Labels: map[string]string{ + util.MultiClusterServiceNameLabel: "test-service", + util.MultiClusterServiceNamespaceLabel: "default", + util.EndpointSliceWorkManagedByLabel: "MultiClusterService.OtherController", + }, + }, + }, + expectedWork: &workv1alpha1.Work{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-work", + Namespace: "test-cluster", + Labels: map[string]string{ + util.EndpointSliceWorkManagedByLabel: "OtherController", + }, + }, + }, + expectedDelete: false, + }, + { + name: "Work managed only by MultiClusterService", + existingWork: &workv1alpha1.Work{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-work", + Namespace: "test-cluster", + Labels: map[string]string{ + util.MultiClusterServiceNameLabel: "test-service", + util.MultiClusterServiceNamespaceLabel: "default", + util.EndpointSliceWorkManagedByLabel: "MultiClusterService", + }, + }, + }, + expectedWork: nil, + expectedDelete: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + scheme := setupSchemeEndpointCollect() + fakeClient := fake.NewClientBuilder().WithScheme(scheme).WithObjects(tc.existingWork).Build() + err := cleanProviderClustersEndpointSliceWork(context.TODO(), fakeClient, tc.existingWork) + assert.NoError(t, err, "Unexpected error in cleanProviderClustersEndpointSliceWork") + + if tc.expectedDelete { + err = fakeClient.Get(context.TODO(), types.NamespacedName{Name: tc.existingWork.Name, Namespace: tc.existingWork.Namespace}, &workv1alpha1.Work{}) + assert.Error(t, err, "Expected Work to be deleted, but it still exists") + assert.True(t, apierrors.IsNotFound(err), "Expected NotFound error, got %v", err) + } else { + updatedWork := &workv1alpha1.Work{} + err = fakeClient.Get(context.TODO(), types.NamespacedName{Name: tc.existingWork.Name, Namespace: tc.existingWork.Namespace}, updatedWork) + assert.NoError(t, err, "Failed to get updated Work") + assert.True(t, compareLabels(updatedWork.Labels, tc.expectedWork.Labels), + "Labels mismatch. Expected %v, but got %v", tc.expectedWork.Labels, updatedWork.Labels) + } + }) + } +} + +// Helper Functions + +// Helper function to set up a scheme for EndpointSlice collection tests +func setupSchemeEndpointCollect() *runtime.Scheme { + scheme := runtime.NewScheme() + _ = workv1alpha1.Install(scheme) + _ = discoveryv1.AddToScheme(scheme) + return scheme +} + +// Helper function to create a test EndpointSlice +func createTestEndpointSlice(name, namespace string) *unstructured.Unstructured { + endpointSlice := &discoveryv1.EndpointSlice{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "discovery.k8s.io/v1", + Kind: "EndpointSlice", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + } + unstructuredObj, _ := runtime.DefaultUnstructuredConverter.ToUnstructured(endpointSlice) + return &unstructured.Unstructured{Object: unstructuredObj} +} + +// Helper function to create an EndpointSlice for testing with specific properties +func createEndpointSliceForTest(name, namespace, serviceName string, isManaged bool) *unstructured.Unstructured { + labels := map[string]interface{}{ + discoveryv1.LabelServiceName: serviceName, + } + if isManaged { + labels[discoveryv1.LabelManagedBy] = util.EndpointSliceDispatchControllerLabelValue + } + return &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "discovery.k8s.io/v1", + "kind": "EndpointSlice", + "metadata": map[string]interface{}{ + "name": name, + "namespace": namespace, + "labels": labels, + }, + }, + } +} + +// Helper function to create an existing Work resource for testing +func createExistingWork(name, namespace, managedBy string) *workv1alpha1.Work { + return &workv1alpha1.Work{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Labels: map[string]string{ + util.EndpointSliceWorkManagedByLabel: managedBy, + }, + }, + } +} + +// Helper function to create a fake client with an optional existing Work +func createFakeClient(existingWork *workv1alpha1.Work) client.Client { + scheme := setupSchemeEndpointCollect() + objs := []client.Object{} + if existingWork != nil { + objs = append(objs, existingWork) + } + return fake.NewClientBuilder().WithScheme(scheme).WithObjects(objs...).Build() +} + +// Helper function to compare two label maps, considering special handling for EndpointSliceWorkManagedByLabel +func compareLabels(actual, expected map[string]string) bool { + if len(actual) != len(expected) { + return false + } + for k, v := range expected { + actualV, exists := actual[k] + if !exists { + return false + } + if k == util.EndpointSliceWorkManagedByLabel { + actualParts := strings.Split(actualV, ".") + expectedParts := strings.Split(v, ".") + sort.Strings(actualParts) + sort.Strings(expectedParts) + if !reflect.DeepEqual(actualParts, expectedParts) { + return false + } + } else if actualV != v { + return false + } + } + return true +} + +// Mock implementations + +type mockAsyncWorker struct { + addCount int +} + +func (m *mockAsyncWorker) Add(_ interface{}) { + m.addCount++ +} + +func (m *mockAsyncWorker) AddAfter(_ interface{}, _ time.Duration) {} + +func (m *mockAsyncWorker) Enqueue(_ interface{}) {} + +func (m *mockAsyncWorker) Run(_ int, _ <-chan struct{}) {} + +type mockResourceEventHandler struct{} + +func (m *mockResourceEventHandler) OnAdd(_ interface{}, _ bool) {} + +func (m *mockResourceEventHandler) OnUpdate(_, _ interface{}) {} + +func (m *mockResourceEventHandler) OnDelete(_ interface{}) {} diff --git a/pkg/controllers/multiclusterservice/endpointslice_dispatch_controller_test.go b/pkg/controllers/multiclusterservice/endpointslice_dispatch_controller_test.go new file mode 100644 index 000000000000..ec12fff7942a --- /dev/null +++ b/pkg/controllers/multiclusterservice/endpointslice_dispatch_controller_test.go @@ -0,0 +1,905 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package multiclusterservice + +import ( + "context" + "errors" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + discoveryv1 "k8s.io/api/discovery/v1" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/tools/record" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + clusterv1alpha1 "github.com/karmada-io/karmada/pkg/apis/cluster/v1alpha1" + networkingv1alpha1 "github.com/karmada-io/karmada/pkg/apis/networking/v1alpha1" + workv1alpha1 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha1" + "github.com/karmada-io/karmada/pkg/util" +) + +func TestUpdateEndpointSliceDispatched(t *testing.T) { + tests := []struct { + name string + mcs *networkingv1alpha1.MultiClusterService + status metav1.ConditionStatus + reason string + message string + expectedCondition metav1.Condition + }{ + { + name: "update status to true", + mcs: &networkingv1alpha1.MultiClusterService{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-mcs", + Namespace: "default", + }, + }, + status: metav1.ConditionTrue, + reason: "EndpointSliceDispatchedSucceed", + message: "EndpointSlice are dispatched successfully", + expectedCondition: metav1.Condition{ + Type: networkingv1alpha1.EndpointSliceDispatched, + Status: metav1.ConditionTrue, + Reason: "EndpointSliceDispatchedSucceed", + Message: "EndpointSlice are dispatched successfully", + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + mockClient := new(MockClient) + mockStatusWriter := new(MockStatusWriter) + + // Expectations Setup + mockClient.On("Status").Return(mockStatusWriter) + mockClient.On("Get", mock.Anything, mock.AnythingOfType("types.NamespacedName"), mock.AnythingOfType("*v1alpha1.MultiClusterService"), mock.Anything). + Run(func(args mock.Arguments) { + arg := args.Get(2).(*networkingv1alpha1.MultiClusterService) + *arg = *tt.mcs // Copy the input MCS to the output + }).Return(nil) + + mockStatusWriter.On("Update", mock.Anything, mock.AnythingOfType("*v1alpha1.MultiClusterService"), mock.Anything). + Run(func(args mock.Arguments) { + mcs := args.Get(1).(*networkingv1alpha1.MultiClusterService) + mcs.Status.Conditions = []metav1.Condition{tt.expectedCondition} + }).Return(nil) + + c := &EndpointsliceDispatchController{ + Client: mockClient, + EventRecorder: record.NewFakeRecorder(100), + } + + err := c.updateEndpointSliceDispatched(context.Background(), tt.mcs, tt.status, tt.reason, tt.message) + assert.NoError(t, err, "updateEndpointSliceDispatched should not return an error") + + mockClient.AssertExpectations(t) + mockStatusWriter.AssertExpectations(t) + + assert.Len(t, tt.mcs.Status.Conditions, 1, "MCS should have one condition") + if len(tt.mcs.Status.Conditions) > 0 { + condition := tt.mcs.Status.Conditions[0] + assert.Equal(t, tt.expectedCondition.Type, condition.Type) + assert.Equal(t, tt.expectedCondition.Status, condition.Status) + assert.Equal(t, tt.expectedCondition.Reason, condition.Reason) + assert.Equal(t, tt.expectedCondition.Message, condition.Message) + } + }) + } +} + +func TestNewClusterFunc(t *testing.T) { + tests := []struct { + name string + existingObjs []client.Object + inputObj client.Object + expectedResult []reconcile.Request + }{ + { + name: "new cluster, matching MCS", + existingObjs: []client.Object{ + &networkingv1alpha1.MultiClusterService{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-mcs", + Namespace: "default", + }, + Spec: networkingv1alpha1.MultiClusterServiceSpec{ + ConsumerClusters: []networkingv1alpha1.ClusterSelector{ + {Name: "cluster1"}, + }, + }, + }, + &workv1alpha1.Work{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-work", + Namespace: "karmada-es-cluster1", + Labels: map[string]string{ + util.MultiClusterServiceNameLabel: "test-mcs", + util.MultiClusterServiceNamespaceLabel: "default", + }, + }, + }, + }, + inputObj: &clusterv1alpha1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster1", + }, + }, + expectedResult: []reconcile.Request{ + {NamespacedName: types.NamespacedName{Namespace: "karmada-es-cluster1", Name: "test-work"}}, + }, + }, + { + name: "new cluster, no matching MCS", + existingObjs: []client.Object{ + &networkingv1alpha1.MultiClusterService{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-mcs", + Namespace: "default", + }, + Spec: networkingv1alpha1.MultiClusterServiceSpec{ + ConsumerClusters: []networkingv1alpha1.ClusterSelector{ + {Name: "cluster2"}, + }, + }, + }, + }, + inputObj: &clusterv1alpha1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster1", + }, + }, + expectedResult: nil, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c := setupController(tt.existingObjs...) + result := c.newClusterFunc()(context.Background(), tt.inputObj) + assert.Equal(t, tt.expectedResult, result) + }) + } +} + +func TestGetClusterEndpointSliceWorks(t *testing.T) { + tests := []struct { + name string + existingObjs []client.Object + mcsNamespace string + mcsName string + expectedWorks int + expectedError bool + listError error + }{ + { + name: "find matching works", + existingObjs: []client.Object{ + &workv1alpha1.Work{ + ObjectMeta: metav1.ObjectMeta{ + Name: "work1", + Namespace: "karmada-es-cluster1", + Labels: map[string]string{ + util.MultiClusterServiceNameLabel: "test-mcs", + util.MultiClusterServiceNamespaceLabel: "default", + }, + }, + }, + &workv1alpha1.Work{ + ObjectMeta: metav1.ObjectMeta{ + Name: "work2", + Namespace: "karmada-es-cluster2", + Labels: map[string]string{ + util.MultiClusterServiceNameLabel: "test-mcs", + util.MultiClusterServiceNamespaceLabel: "default", + }, + }, + }, + &workv1alpha1.Work{ + ObjectMeta: metav1.ObjectMeta{ + Name: "work3", + Namespace: "karmada-es-cluster3", + Labels: map[string]string{ + util.MultiClusterServiceNameLabel: "other-mcs", + util.MultiClusterServiceNamespaceLabel: "default", + }, + }, + }, + }, + mcsNamespace: "default", + mcsName: "test-mcs", + expectedWorks: 2, + expectedError: false, + }, + { + name: "no matching works", + existingObjs: []client.Object{ + &workv1alpha1.Work{ + ObjectMeta: metav1.ObjectMeta{ + Name: "work1", + Namespace: "karmada-es-cluster1", + Labels: map[string]string{ + util.MultiClusterServiceNameLabel: "other-mcs", + util.MultiClusterServiceNamespaceLabel: "default", + }, + }, + }, + }, + mcsNamespace: "default", + mcsName: "test-mcs", + expectedWorks: 0, + expectedError: false, + }, + { + name: "works in different namespace", + existingObjs: []client.Object{ + &workv1alpha1.Work{ + ObjectMeta: metav1.ObjectMeta{ + Name: "work1", + Namespace: "karmada-es-cluster1", + Labels: map[string]string{ + util.MultiClusterServiceNameLabel: "test-mcs", + util.MultiClusterServiceNamespaceLabel: "test-namespace", + }, + }, + }, + }, + mcsNamespace: "test-namespace", + mcsName: "test-mcs", + expectedWorks: 1, + expectedError: false, + }, + { + name: "list error", + existingObjs: []client.Object{}, + mcsNamespace: "default", + mcsName: "test-mcs", + expectedWorks: 0, + expectedError: true, + listError: errors.New("fake list error"), + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c := setupController(tt.existingObjs...) + if tt.listError != nil { + c.Client = &fakeClient{Client: c.Client, listError: tt.listError} + } + works, err := c.getClusterEndpointSliceWorks(context.Background(), tt.mcsNamespace, tt.mcsName) + if tt.expectedError { + assert.Error(t, err) + assert.Nil(t, works) + } else { + assert.NoError(t, err) + assert.Len(t, works, tt.expectedWorks) + for _, work := range works { + assert.Equal(t, tt.mcsName, work.Labels[util.MultiClusterServiceNameLabel]) + assert.Equal(t, tt.mcsNamespace, work.Labels[util.MultiClusterServiceNamespaceLabel]) + } + } + }) + } +} + +func TestNewMultiClusterServiceFunc(t *testing.T) { + tests := []struct { + name string + existingObjs []client.Object + inputObj client.Object + expectedResult []reconcile.Request + }{ + { + name: "MCS with matching works", + existingObjs: []client.Object{ + &workv1alpha1.Work{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-work-1", + Namespace: "karmada-es-cluster1", + Labels: map[string]string{ + util.MultiClusterServiceNameLabel: "test-mcs", + util.MultiClusterServiceNamespaceLabel: "default", + }, + }, + }, + &workv1alpha1.Work{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-work-2", + Namespace: "karmada-es-cluster2", + Labels: map[string]string{ + util.MultiClusterServiceNameLabel: "test-mcs", + util.MultiClusterServiceNamespaceLabel: "default", + }, + Annotations: map[string]string{ + util.EndpointSliceProvisionClusterAnnotation: "cluster2", + }, + }, + }, + }, + inputObj: &networkingv1alpha1.MultiClusterService{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-mcs", + Namespace: "default", + }, + }, + expectedResult: []reconcile.Request{ + {NamespacedName: types.NamespacedName{Namespace: "karmada-es-cluster1", Name: "test-work-1"}}, + }, + }, + { + name: "MCS with no matching works", + existingObjs: []client.Object{ + &workv1alpha1.Work{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-work", + Namespace: "karmada-es-cluster1", + Labels: map[string]string{ + util.MultiClusterServiceNameLabel: "other-mcs", + util.MultiClusterServiceNamespaceLabel: "default", + }, + }, + }, + }, + inputObj: &networkingv1alpha1.MultiClusterService{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-mcs", + Namespace: "default", + }, + }, + expectedResult: nil, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c := setupController(tt.existingObjs...) + result := c.newMultiClusterServiceFunc()(context.Background(), tt.inputObj) + assert.Equal(t, tt.expectedResult, result) + }) + } +} + +func TestCleanOrphanDispatchedEndpointSlice(t *testing.T) { + tests := []struct { + name string + existingObjs []client.Object + mcs *networkingv1alpha1.MultiClusterService + expectedDeletes int + expectedError bool + }{ + { + name: "clean orphan works", + existingObjs: []client.Object{ + &workv1alpha1.Work{ + ObjectMeta: metav1.ObjectMeta{ + Name: "work1", + Namespace: "karmada-es-cluster1", + Labels: map[string]string{ + util.MultiClusterServiceNameLabel: "test-mcs", + util.MultiClusterServiceNamespaceLabel: "default", + }, + Annotations: map[string]string{ + util.EndpointSliceProvisionClusterAnnotation: "provider", + }, + }, + }, + &workv1alpha1.Work{ + ObjectMeta: metav1.ObjectMeta{ + Name: "work2", + Namespace: "karmada-es-cluster2", + Labels: map[string]string{ + util.MultiClusterServiceNameLabel: "test-mcs", + util.MultiClusterServiceNamespaceLabel: "default", + }, + Annotations: map[string]string{ + util.EndpointSliceProvisionClusterAnnotation: "provider", + }, + }, + }, + }, + mcs: &networkingv1alpha1.MultiClusterService{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-mcs", + Namespace: "default", + }, + Spec: networkingv1alpha1.MultiClusterServiceSpec{ + ConsumerClusters: []networkingv1alpha1.ClusterSelector{ + {Name: "cluster1"}, + }, + }, + }, + expectedDeletes: 1, + expectedError: false, + }, + { + name: "no orphan works", + existingObjs: []client.Object{ + &workv1alpha1.Work{ + ObjectMeta: metav1.ObjectMeta{ + Name: "work1", + Namespace: "karmada-es-cluster1", + Labels: map[string]string{ + util.MultiClusterServiceNameLabel: "test-mcs", + util.MultiClusterServiceNamespaceLabel: "default", + }, + Annotations: map[string]string{ + util.EndpointSliceProvisionClusterAnnotation: "provider", + }, + }, + }, + }, + mcs: &networkingv1alpha1.MultiClusterService{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-mcs", + Namespace: "default", + }, + Spec: networkingv1alpha1.MultiClusterServiceSpec{ + ConsumerClusters: []networkingv1alpha1.ClusterSelector{ + {Name: "cluster1"}, + }, + }, + }, + expectedDeletes: 0, + expectedError: false, + }, + { + name: "work without provision cluster annotation", + existingObjs: []client.Object{ + &workv1alpha1.Work{ + ObjectMeta: metav1.ObjectMeta{ + Name: "work1", + Namespace: "karmada-es-cluster1", + Labels: map[string]string{ + util.MultiClusterServiceNameLabel: "test-mcs", + util.MultiClusterServiceNamespaceLabel: "default", + }, + }, + }, + }, + mcs: &networkingv1alpha1.MultiClusterService{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-mcs", + Namespace: "default", + }, + Spec: networkingv1alpha1.MultiClusterServiceSpec{ + ConsumerClusters: []networkingv1alpha1.ClusterSelector{ + {Name: "cluster2"}, + }, + }, + }, + expectedDeletes: 0, + expectedError: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + scheme := setupSchemeEndpointDispatch() + fakeClient := fake.NewClientBuilder().WithScheme(scheme).WithObjects(tt.existingObjs...).Build() + c := &EndpointsliceDispatchController{ + Client: fakeClient, + } + err := c.cleanOrphanDispatchedEndpointSlice(context.Background(), tt.mcs) + if tt.expectedError { + assert.Error(t, err) + } else { + assert.NoError(t, err) + // Check if the expected number of works were deleted + remainingWorks := &workv1alpha1.WorkList{} + err = fakeClient.List(context.Background(), remainingWorks, &client.ListOptions{}) + assert.NoError(t, err) + assert.Len(t, remainingWorks.Items, len(tt.existingObjs)-tt.expectedDeletes) + } + }) + } +} + +func TestEnsureEndpointSliceWork(t *testing.T) { + tests := []struct { + name string + mcs *networkingv1alpha1.MultiClusterService + work *workv1alpha1.Work + providerCluster string + consumerCluster string + expectedError bool + expectedWork *workv1alpha1.Work + }{ + { + name: "create new work", + mcs: &networkingv1alpha1.MultiClusterService{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-mcs", + Namespace: "default", + }, + }, + work: &workv1alpha1.Work{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-work", + Namespace: "karmada-es-provider", + }, + Spec: workv1alpha1.WorkSpec{ + Workload: workv1alpha1.WorkloadTemplate{ + Manifests: []workv1alpha1.Manifest{ + { + RawExtension: runtime.RawExtension{ + Raw: []byte(`{ + "apiVersion": "discovery.k8s.io/v1", + "kind": "EndpointSlice", + "metadata": { + "name": "test-eps" + }, + "endpoints": [ + { + "addresses": ["10.0.0.1"] + } + ], + "ports": [ + { + "port": 80 + } + ] + }`), + }, + }, + }, + }, + }, + }, + providerCluster: "provider", + consumerCluster: "consumer", + expectedError: false, + expectedWork: &workv1alpha1.Work{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-work", + Namespace: "karmada-es-consumer", + Finalizers: []string{util.ExecutionControllerFinalizer}, + Annotations: map[string]string{ + util.EndpointSliceProvisionClusterAnnotation: "provider", + }, + Labels: map[string]string{ + util.MultiClusterServiceNameLabel: "test-mcs", + util.MultiClusterServiceNamespaceLabel: "default", + }, + }, + Spec: workv1alpha1.WorkSpec{ + Workload: workv1alpha1.WorkloadTemplate{ + Manifests: []workv1alpha1.Manifest{ + { + RawExtension: runtime.RawExtension{ + Raw: []byte(`{ + "apiVersion": "discovery.k8s.io/v1", + "kind": "EndpointSlice", + "metadata": { + "name": "provider-test-eps", + "labels": { + "kubernetes.io/service-name": "test-mcs", + "endpointslice.kubernetes.io/managed-by": "endpointslice-dispatch-controller.karmada.io", + "karmada.io/managed": "true" + }, + "annotations": { + "endpointslice.karmada.io/provision-cluster": "provider", + "work.karmada.io/name": "test-work", + "work.karmada.io/namespace": "karmada-es-consumer", + "resourcetemplate.karmada.io/uid": "" + } + }, + "endpoints": [ + { + "addresses": ["10.0.0.1"] + } + ], + "ports": [ + { + "port": 80 + } + ] + }`), + }, + }, + }, + }, + }, + }, + }, + { + name: "empty manifest", + mcs: &networkingv1alpha1.MultiClusterService{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-mcs", + Namespace: "default", + }, + }, + work: &workv1alpha1.Work{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-work", + Namespace: "karmada-es-provider", + }, + Spec: workv1alpha1.WorkSpec{ + Workload: workv1alpha1.WorkloadTemplate{ + Manifests: []workv1alpha1.Manifest{}, + }, + }, + }, + providerCluster: "provider", + consumerCluster: "consumer", + expectedError: false, + expectedWork: nil, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + scheme := setupSchemeEndpointDispatch() + fakeClient := fake.NewClientBuilder().WithScheme(scheme).Build() + c := &EndpointsliceDispatchController{ + Client: fakeClient, + } + + err := c.ensureEndpointSliceWork(context.Background(), tt.mcs, tt.work, tt.providerCluster, tt.consumerCluster) + + if tt.expectedError { + assert.Error(t, err) + } else { + assert.NoError(t, err) + + if tt.expectedWork != nil { + createdWork := &workv1alpha1.Work{} + err = fakeClient.Get(context.Background(), types.NamespacedName{ + Name: tt.expectedWork.Name, + Namespace: tt.expectedWork.Namespace, + }, createdWork) + assert.NoError(t, err) + + assert.Equal(t, tt.expectedWork.ObjectMeta.Name, createdWork.ObjectMeta.Name) + assert.Equal(t, tt.expectedWork.ObjectMeta.Namespace, createdWork.ObjectMeta.Namespace) + assert.Equal(t, tt.expectedWork.ObjectMeta.Finalizers, createdWork.ObjectMeta.Finalizers) + assert.Equal(t, tt.expectedWork.ObjectMeta.Annotations, createdWork.ObjectMeta.Annotations) + assert.Equal(t, tt.expectedWork.ObjectMeta.Labels, createdWork.ObjectMeta.Labels) + + // Comparing manifests + assert.Equal(t, len(tt.expectedWork.Spec.Workload.Manifests), len(createdWork.Spec.Workload.Manifests)) + if len(tt.expectedWork.Spec.Workload.Manifests) > 0 { + expectedManifest := &unstructured.Unstructured{} + createdManifest := &unstructured.Unstructured{} + + err = expectedManifest.UnmarshalJSON(tt.expectedWork.Spec.Workload.Manifests[0].Raw) + assert.NoError(t, err) + err = createdManifest.UnmarshalJSON(createdWork.Spec.Workload.Manifests[0].Raw) + assert.NoError(t, err) + + assert.Equal(t, expectedManifest.GetName(), createdManifest.GetName()) + assert.Equal(t, expectedManifest.GetLabels(), createdManifest.GetLabels()) + assert.Equal(t, expectedManifest.GetAnnotations(), createdManifest.GetAnnotations()) + } + } else { + workList := &workv1alpha1.WorkList{} + err = fakeClient.List(context.Background(), workList) + assert.NoError(t, err) + assert.Empty(t, workList.Items) + } + } + }) + } +} + +func TestCleanupEndpointSliceFromConsumerClusters(t *testing.T) { + tests := []struct { + name string + existingObjs []client.Object + inputWork *workv1alpha1.Work + expectedErr bool + }{ + { + name: "cleanup works in consumer clusters", + existingObjs: []client.Object{ + &workv1alpha1.Work{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-work-1", + Namespace: "karmada-es-cluster1", + Annotations: map[string]string{ + util.EndpointSliceProvisionClusterAnnotation: "cluster1", + }, + }, + }, + &workv1alpha1.Work{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-work-2", + Namespace: "karmada-es-cluster2", + Annotations: map[string]string{ + util.EndpointSliceProvisionClusterAnnotation: "cluster1", + }, + }, + }, + }, + inputWork: &workv1alpha1.Work{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-work", + Namespace: "karmada-es-cluster1", + Finalizers: []string{ + util.MCSEndpointSliceDispatchControllerFinalizer, + }, + }, + }, + expectedErr: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + scheme := setupSchemeEndpointDispatch() + c := &EndpointsliceDispatchController{ + Client: fake.NewClientBuilder().WithScheme(scheme).WithObjects(append(tt.existingObjs, tt.inputWork)...).Build(), + } + + err := c.cleanupEndpointSliceFromConsumerClusters(context.Background(), tt.inputWork) + if tt.expectedErr { + assert.Error(t, err) + } else { + assert.NoError(t, err) + + // Check if works are deleted + for _, obj := range tt.existingObjs { + work := obj.(*workv1alpha1.Work) + err := c.Client.Get(context.Background(), types.NamespacedName{Namespace: work.Namespace, Name: work.Name}, &workv1alpha1.Work{}) + assert.True(t, client.IgnoreNotFound(err) == nil) + } + + // Check if the finalizer is removed + updatedWork := &workv1alpha1.Work{} + err := c.Client.Get(context.Background(), types.NamespacedName{Namespace: tt.inputWork.Namespace, Name: tt.inputWork.Name}, updatedWork) + assert.NoError(t, err) + assert.NotContains(t, updatedWork.Finalizers, util.MCSEndpointSliceDispatchControllerFinalizer) + } + }) + } +} + +// Helper Functions + +// Helper function to create and configure a runtime scheme for the controller +func setupSchemeEndpointDispatch() *runtime.Scheme { + scheme := runtime.NewScheme() + _ = networkingv1alpha1.Install(scheme) + _ = workv1alpha1.Install(scheme) + _ = clusterv1alpha1.Install(scheme) + _ = discoveryv1.AddToScheme(scheme) + return scheme +} + +// Helper function to create a new EndpointsliceDispatchController with a fake client for testing +func setupController(objs ...client.Object) *EndpointsliceDispatchController { + scheme := setupSchemeEndpointDispatch() + return &EndpointsliceDispatchController{ + Client: fake.NewClientBuilder().WithScheme(scheme).WithObjects(objs...).Build(), + EventRecorder: record.NewFakeRecorder(100), + } +} + +// Mock implementations + +// MockClient is a mock of client.Client interface +type MockClient struct { + mock.Mock +} + +func (m *MockClient) Get(ctx context.Context, key client.ObjectKey, obj client.Object, opts ...client.GetOption) error { + args := m.Called(ctx, key, obj, opts) + return args.Error(0) +} + +func (m *MockClient) List(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error { + args := m.Called(ctx, list, opts) + return args.Error(0) +} + +func (m *MockClient) Create(ctx context.Context, obj client.Object, opts ...client.CreateOption) error { + args := m.Called(ctx, obj, opts) + return args.Error(0) +} + +func (m *MockClient) Delete(ctx context.Context, obj client.Object, opts ...client.DeleteOption) error { + args := m.Called(ctx, obj, opts) + return args.Error(0) +} + +func (m *MockClient) Update(ctx context.Context, obj client.Object, opts ...client.UpdateOption) error { + args := m.Called(ctx, obj, opts) + return args.Error(0) +} + +func (m *MockClient) Patch(ctx context.Context, obj client.Object, patch client.Patch, opts ...client.PatchOption) error { + args := m.Called(ctx, obj, patch, opts) + return args.Error(0) +} + +func (m *MockClient) DeleteAllOf(ctx context.Context, obj client.Object, opts ...client.DeleteAllOfOption) error { + args := m.Called(ctx, obj, opts) + return args.Error(0) +} + +func (m *MockClient) Status() client.StatusWriter { + args := m.Called() + return args.Get(0).(client.StatusWriter) +} + +func (m *MockClient) SubResource(subResource string) client.SubResourceClient { + args := m.Called(subResource) + return args.Get(0).(client.SubResourceClient) +} + +func (m *MockClient) Scheme() *runtime.Scheme { + args := m.Called() + return args.Get(0).(*runtime.Scheme) +} + +func (m *MockClient) RESTMapper() meta.RESTMapper { + args := m.Called() + return args.Get(0).(meta.RESTMapper) +} + +func (m *MockClient) GroupVersionKindFor(obj runtime.Object) (schema.GroupVersionKind, error) { + args := m.Called(obj) + return args.Get(0).(schema.GroupVersionKind), args.Error(1) +} + +func (m *MockClient) IsObjectNamespaced(obj runtime.Object) (bool, error) { + args := m.Called(obj) + return args.Bool(0), args.Error(1) +} + +// MockStatusWriter is a mock of client.StatusWriter interface +type MockStatusWriter struct { + mock.Mock +} + +func (m *MockStatusWriter) Create(ctx context.Context, obj client.Object, subResource client.Object, opts ...client.SubResourceCreateOption) error { + args := m.Called(ctx, obj, subResource, opts) + return args.Error(0) +} + +func (m *MockStatusWriter) Update(ctx context.Context, obj client.Object, opts ...client.SubResourceUpdateOption) error { + args := m.Called(ctx, obj, opts) + return args.Error(0) +} + +func (m *MockStatusWriter) Patch(ctx context.Context, obj client.Object, patch client.Patch, opts ...client.SubResourcePatchOption) error { + args := m.Called(ctx, obj, patch, opts) + return args.Error(0) +} + +// Custom fake client that can simulate list errors +type fakeClient struct { + client.Client + listError error +} + +func (f *fakeClient) List(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error { + if f.listError != nil { + return f.listError + } + return f.Client.List(ctx, list, opts...) +} diff --git a/pkg/controllers/multiclusterservice/mcs_controller_test.go b/pkg/controllers/multiclusterservice/mcs_controller_test.go new file mode 100644 index 000000000000..738022942574 --- /dev/null +++ b/pkg/controllers/multiclusterservice/mcs_controller_test.go @@ -0,0 +1,1103 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package multiclusterservice + +import ( + "context" + "fmt" + "sort" + "testing" + + "github.com/stretchr/testify/assert" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/tools/record" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + clusterv1alpha1 "github.com/karmada-io/karmada/pkg/apis/cluster/v1alpha1" + networkingv1alpha1 "github.com/karmada-io/karmada/pkg/apis/networking/v1alpha1" + policyv1alpha1 "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1" + workv1alpha1 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha1" + workv1alpha2 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2" + "github.com/karmada-io/karmada/pkg/util" + "github.com/karmada-io/karmada/pkg/util/names" +) + +func TestHandleMultiClusterServiceDelete(t *testing.T) { + tests := []struct { + name string + mcs *networkingv1alpha1.MultiClusterService + existingService *corev1.Service + existingResourceBinding *workv1alpha2.ResourceBinding + expectedServiceLabels map[string]string + expectedServiceAnnotations map[string]string + expectedRBLabels map[string]string + expectedRBAnnotations map[string]string + }{ + { + name: "Delete MCS and clean up Service and ResourceBinding", + mcs: &networkingv1alpha1.MultiClusterService{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-mcs", + Namespace: "default", + Finalizers: []string{util.MCSControllerFinalizer}, + }, + }, + existingService: &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-mcs", + Namespace: "default", + Labels: map[string]string{ + util.ResourceTemplateClaimedByLabel: util.MultiClusterServiceKind, + networkingv1alpha1.MultiClusterServicePermanentIDLabel: "test-id", + }, + Annotations: map[string]string{ + networkingv1alpha1.MultiClusterServiceNameAnnotation: "test-mcs", + networkingv1alpha1.MultiClusterServiceNamespaceAnnotation: "default", + }, + }, + }, + existingResourceBinding: &workv1alpha2.ResourceBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: "service-test-mcs", + Namespace: "default", + Labels: map[string]string{ + workv1alpha2.BindingManagedByLabel: util.MultiClusterServiceKind, + networkingv1alpha1.MultiClusterServicePermanentIDLabel: "test-id", + }, + Annotations: map[string]string{ + networkingv1alpha1.MultiClusterServiceNameAnnotation: "test-mcs", + networkingv1alpha1.MultiClusterServiceNamespaceAnnotation: "default", + }, + }, + }, + expectedServiceLabels: nil, + expectedServiceAnnotations: map[string]string{ + networkingv1alpha1.MultiClusterServiceNameAnnotation: "test-mcs", + networkingv1alpha1.MultiClusterServiceNamespaceAnnotation: "default", + }, + expectedRBLabels: map[string]string{ + workv1alpha2.BindingManagedByLabel: util.MultiClusterServiceKind, + networkingv1alpha1.MultiClusterServicePermanentIDLabel: "test-id", + }, + expectedRBAnnotations: map[string]string{ + networkingv1alpha1.MultiClusterServiceNameAnnotation: "test-mcs", + networkingv1alpha1.MultiClusterServiceNamespaceAnnotation: "default", + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + controller := newFakeController(tt.mcs, tt.existingService, tt.existingResourceBinding) + + _, err := controller.handleMultiClusterServiceDelete(context.Background(), tt.mcs) + assert.NoError(t, err) + + updatedService := &corev1.Service{} + err = controller.Client.Get(context.Background(), types.NamespacedName{Namespace: tt.mcs.Namespace, Name: tt.mcs.Name}, updatedService) + assert.NoError(t, err) + + updatedRB := &workv1alpha2.ResourceBinding{} + err = controller.Client.Get(context.Background(), types.NamespacedName{Namespace: tt.mcs.Namespace, Name: "service-" + tt.mcs.Name}, updatedRB) + assert.NoError(t, err) + + assert.Equal(t, tt.expectedServiceLabels, updatedService.Labels) + assert.Equal(t, tt.expectedServiceAnnotations, updatedService.Annotations) + assert.Equal(t, tt.expectedRBLabels, updatedRB.Labels) + assert.Equal(t, tt.expectedRBAnnotations, updatedRB.Annotations) + + updatedMCS := &networkingv1alpha1.MultiClusterService{} + err = controller.Client.Get(context.Background(), types.NamespacedName{Namespace: tt.mcs.Namespace, Name: tt.mcs.Name}, updatedMCS) + assert.NoError(t, err) + assert.NotContains(t, updatedMCS.Finalizers, util.MCSControllerFinalizer) + }) + } +} + +func TestRetrieveMultiClusterService(t *testing.T) { + tests := []struct { + name string + mcs *networkingv1alpha1.MultiClusterService + existingWorks []*workv1alpha1.Work + providerClusters sets.Set[string] + clusters []*clusterv1alpha1.Cluster + expectedWorks int + }{ + { + name: "Remove work for non-provider cluster", + mcs: &networkingv1alpha1.MultiClusterService{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-mcs", + Namespace: "default", + Labels: map[string]string{ + networkingv1alpha1.MultiClusterServicePermanentIDLabel: "test-id", + }, + }, + }, + existingWorks: []*workv1alpha1.Work{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: names.GenerateWorkName("MultiClusterService", "test-mcs", "default"), + Namespace: names.GenerateExecutionSpaceName("cluster1"), + Labels: map[string]string{ + networkingv1alpha1.MultiClusterServicePermanentIDLabel: "test-id", + }, + }, + Spec: workv1alpha1.WorkSpec{ + Workload: workv1alpha1.WorkloadTemplate{ + Manifests: []workv1alpha1.Manifest{ + { + RawExtension: runtime.RawExtension{Raw: []byte(`{"apiVersion":"networking.karmada.io/v1alpha1","kind":"MultiClusterService"}`)}, + }, + }, + }, + }, + }, + }, + providerClusters: sets.New("cluster2"), + clusters: []*clusterv1alpha1.Cluster{ + { + ObjectMeta: metav1.ObjectMeta{Name: "cluster1"}, + Status: clusterv1alpha1.ClusterStatus{ + Conditions: []metav1.Condition{ + {Type: clusterv1alpha1.ClusterConditionReady, Status: metav1.ConditionTrue}, + }, + }, + }, + }, + expectedWorks: 0, + }, + { + name: "Keep work for provider cluster", + mcs: &networkingv1alpha1.MultiClusterService{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-mcs", + Namespace: "default", + Labels: map[string]string{ + networkingv1alpha1.MultiClusterServicePermanentIDLabel: "test-id", + }, + }, + }, + existingWorks: []*workv1alpha1.Work{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: names.GenerateWorkName("MultiClusterService", "test-mcs", "default"), + Namespace: names.GenerateExecutionSpaceName("cluster1"), + Labels: map[string]string{ + networkingv1alpha1.MultiClusterServicePermanentIDLabel: "test-id", + }, + }, + Spec: workv1alpha1.WorkSpec{ + Workload: workv1alpha1.WorkloadTemplate{ + Manifests: []workv1alpha1.Manifest{ + { + RawExtension: runtime.RawExtension{Raw: []byte(`{"apiVersion":"networking.karmada.io/v1alpha1","kind":"MultiClusterService"}`)}, + }, + }, + }, + }, + }, + }, + providerClusters: sets.New("cluster1"), + clusters: []*clusterv1alpha1.Cluster{ + { + ObjectMeta: metav1.ObjectMeta{Name: "cluster1"}, + Status: clusterv1alpha1.ClusterStatus{ + Conditions: []metav1.Condition{ + {Type: clusterv1alpha1.ClusterConditionReady, Status: metav1.ConditionTrue}, + }, + }, + }, + }, + expectedWorks: 1, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + objs := []runtime.Object{tt.mcs} + objs = append(objs, toRuntimeObjects(tt.existingWorks)...) + objs = append(objs, toRuntimeObjects(tt.clusters)...) + + controller := newFakeController(objs...) + + err := controller.retrieveMultiClusterService(context.Background(), tt.mcs, tt.providerClusters) + assert.NoError(t, err) + + workList := &workv1alpha1.WorkList{} + err = controller.Client.List(context.Background(), workList) + assert.NoError(t, err) + + assert.Equal(t, tt.expectedWorks, len(workList.Items)) + }) + } +} + +func TestPropagateMultiClusterService(t *testing.T) { + tests := []struct { + name string + mcs *networkingv1alpha1.MultiClusterService + providerClusters sets.Set[string] + clusters []*clusterv1alpha1.Cluster + expectedWorks int + }{ + { + name: "Propagate to one ready cluster", + mcs: &networkingv1alpha1.MultiClusterService{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-mcs", + Namespace: "default", + Labels: map[string]string{ + networkingv1alpha1.MultiClusterServicePermanentIDLabel: "test-id", + }, + }, + }, + providerClusters: sets.New("cluster1"), + clusters: []*clusterv1alpha1.Cluster{ + { + ObjectMeta: metav1.ObjectMeta{Name: "cluster1"}, + Status: clusterv1alpha1.ClusterStatus{ + Conditions: []metav1.Condition{ + {Type: clusterv1alpha1.ClusterConditionReady, Status: metav1.ConditionTrue}, + }, + APIEnablements: []clusterv1alpha1.APIEnablement{ + { + GroupVersion: "discovery.k8s.io/v1", + Resources: []clusterv1alpha1.APIResource{ + {Kind: "EndpointSlice"}, + }, + }, + }, + }, + }, + }, + expectedWorks: 1, + }, + { + name: "No propagation to unready cluster", + mcs: &networkingv1alpha1.MultiClusterService{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-mcs", + Namespace: "default", + Labels: map[string]string{ + networkingv1alpha1.MultiClusterServicePermanentIDLabel: "test-id", + }, + }, + }, + providerClusters: sets.New("cluster1"), + clusters: []*clusterv1alpha1.Cluster{ + { + ObjectMeta: metav1.ObjectMeta{Name: "cluster1"}, + Status: clusterv1alpha1.ClusterStatus{ + Conditions: []metav1.Condition{ + {Type: clusterv1alpha1.ClusterConditionReady, Status: metav1.ConditionFalse}, + }, + }, + }, + }, + expectedWorks: 0, + }, + { + name: "No propagation to cluster without EndpointSlice support", + mcs: &networkingv1alpha1.MultiClusterService{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-mcs", + Namespace: "default", + Labels: map[string]string{ + networkingv1alpha1.MultiClusterServicePermanentIDLabel: "test-id", + }, + }, + }, + providerClusters: sets.New("cluster1"), + clusters: []*clusterv1alpha1.Cluster{ + { + ObjectMeta: metav1.ObjectMeta{Name: "cluster1"}, + Status: clusterv1alpha1.ClusterStatus{ + Conditions: []metav1.Condition{ + {Type: clusterv1alpha1.ClusterConditionReady, Status: metav1.ConditionTrue}, + }, + APIEnablements: []clusterv1alpha1.APIEnablement{ + { + GroupVersion: "v1", + Resources: []clusterv1alpha1.APIResource{ + {Kind: "Pod"}, + }, + }, + }, + }, + }, + }, + expectedWorks: 0, + }, + { + name: "Propagate to multiple ready clusters", + mcs: &networkingv1alpha1.MultiClusterService{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-mcs", + Namespace: "default", + Labels: map[string]string{ + networkingv1alpha1.MultiClusterServicePermanentIDLabel: "test-id", + }, + }, + }, + providerClusters: sets.New("cluster1", "cluster2"), + clusters: []*clusterv1alpha1.Cluster{ + { + ObjectMeta: metav1.ObjectMeta{Name: "cluster1"}, + Status: clusterv1alpha1.ClusterStatus{ + Conditions: []metav1.Condition{ + {Type: clusterv1alpha1.ClusterConditionReady, Status: metav1.ConditionTrue}, + }, + APIEnablements: []clusterv1alpha1.APIEnablement{ + { + GroupVersion: "discovery.k8s.io/v1", + Resources: []clusterv1alpha1.APIResource{ + {Kind: "EndpointSlice"}, + }, + }, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{Name: "cluster2"}, + Status: clusterv1alpha1.ClusterStatus{ + Conditions: []metav1.Condition{ + {Type: clusterv1alpha1.ClusterConditionReady, Status: metav1.ConditionTrue}, + }, + APIEnablements: []clusterv1alpha1.APIEnablement{ + { + GroupVersion: "discovery.k8s.io/v1", + Resources: []clusterv1alpha1.APIResource{ + {Kind: "EndpointSlice"}, + }, + }, + }, + }, + }, + }, + expectedWorks: 2, + }, + { + name: "Mixed cluster readiness and API support", + mcs: &networkingv1alpha1.MultiClusterService{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-mcs", + Namespace: "default", + Labels: map[string]string{ + networkingv1alpha1.MultiClusterServicePermanentIDLabel: "test-id", + }, + }, + }, + providerClusters: sets.New("cluster1", "cluster2", "cluster3"), + clusters: []*clusterv1alpha1.Cluster{ + { + ObjectMeta: metav1.ObjectMeta{Name: "cluster1"}, + Status: clusterv1alpha1.ClusterStatus{ + Conditions: []metav1.Condition{ + {Type: clusterv1alpha1.ClusterConditionReady, Status: metav1.ConditionTrue}, + }, + APIEnablements: []clusterv1alpha1.APIEnablement{ + { + GroupVersion: "discovery.k8s.io/v1", + Resources: []clusterv1alpha1.APIResource{ + {Kind: "EndpointSlice"}, + }, + }, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{Name: "cluster2"}, + Status: clusterv1alpha1.ClusterStatus{ + Conditions: []metav1.Condition{ + {Type: clusterv1alpha1.ClusterConditionReady, Status: metav1.ConditionFalse}, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{Name: "cluster3"}, + Status: clusterv1alpha1.ClusterStatus{ + Conditions: []metav1.Condition{ + {Type: clusterv1alpha1.ClusterConditionReady, Status: metav1.ConditionTrue}, + }, + APIEnablements: []clusterv1alpha1.APIEnablement{ + { + GroupVersion: "v1", + Resources: []clusterv1alpha1.APIResource{ + {Kind: "Pod"}, + }, + }, + }, + }, + }, + }, + expectedWorks: 1, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + objs := []runtime.Object{tt.mcs} + objs = append(objs, toRuntimeObjects(tt.clusters)...) + + controller := newFakeController(objs...) + + err := controller.propagateMultiClusterService(context.Background(), tt.mcs, tt.providerClusters) + assert.NoError(t, err) + + workList := &workv1alpha1.WorkList{} + err = controller.Client.List(context.Background(), workList) + assert.NoError(t, err) + assert.Equal(t, tt.expectedWorks, len(workList.Items)) + + if tt.expectedWorks > 0 { + for _, work := range workList.Items { + assert.Equal(t, names.GenerateWorkName(tt.mcs.Kind, tt.mcs.Name, tt.mcs.Namespace), work.Name) + clusterName, err := names.GetClusterName(work.Namespace) + assert.NoError(t, err) + assert.Contains(t, tt.providerClusters, clusterName) + assert.Equal(t, "test-id", work.Labels[networkingv1alpha1.MultiClusterServicePermanentIDLabel]) + } + } + }) + } +} + +func TestBuildResourceBinding(t *testing.T) { + tests := []struct { + name string + svc *corev1.Service + mcs *networkingv1alpha1.MultiClusterService + providerClusters sets.Set[string] + consumerClusters sets.Set[string] + }{ + { + name: "Build ResourceBinding with non-overlapping clusters", + svc: &corev1.Service{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "Service", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-service", + Namespace: "default", + UID: "test-uid", + ResourceVersion: "1234", + }, + }, + mcs: &networkingv1alpha1.MultiClusterService{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-mcs", + Namespace: "default", + Labels: map[string]string{ + networkingv1alpha1.MultiClusterServicePermanentIDLabel: "test-id", + }, + }, + }, + providerClusters: sets.New("cluster1", "cluster2"), + consumerClusters: sets.New("cluster3", "cluster4"), + }, + { + name: "Build ResourceBinding with empty consumer clusters", + svc: &corev1.Service{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "Service", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-service", + Namespace: "default", + UID: "test-uid", + ResourceVersion: "1234", + }, + }, + mcs: &networkingv1alpha1.MultiClusterService{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-mcs", + Namespace: "default", + Labels: map[string]string{ + networkingv1alpha1.MultiClusterServicePermanentIDLabel: "test-id", + }, + }, + }, + providerClusters: sets.New("cluster1", "cluster2"), + consumerClusters: sets.New[string](), + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + controller := newFakeController() + rb, err := controller.buildResourceBinding(tt.svc, tt.mcs, tt.providerClusters, tt.consumerClusters) + + assert.NoError(t, err) + assert.NotNil(t, rb) + + // ObjectMeta Check + assert.Equal(t, names.GenerateBindingName(tt.svc.Kind, tt.svc.Name), rb.Name) + assert.Equal(t, tt.svc.Namespace, rb.Namespace) + + // Annotations Check + assert.Equal(t, tt.mcs.Name, rb.Annotations[networkingv1alpha1.MultiClusterServiceNameAnnotation]) + assert.Equal(t, tt.mcs.Namespace, rb.Annotations[networkingv1alpha1.MultiClusterServiceNamespaceAnnotation]) + + // Labels Check + assert.Equal(t, util.MultiClusterServiceKind, rb.Labels[workv1alpha2.BindingManagedByLabel]) + assert.Equal(t, "test-id", rb.Labels[networkingv1alpha1.MultiClusterServicePermanentIDLabel]) + + // OwnerReferences Check + assert.Len(t, rb.OwnerReferences, 1) + assert.Equal(t, tt.svc.APIVersion, rb.OwnerReferences[0].APIVersion) + assert.Equal(t, tt.svc.Kind, rb.OwnerReferences[0].Kind) + assert.Equal(t, tt.svc.Name, rb.OwnerReferences[0].Name) + assert.Equal(t, tt.svc.UID, rb.OwnerReferences[0].UID) + + // Finalizers Check + assert.Contains(t, rb.Finalizers, util.BindingControllerFinalizer) + + // Spec Check + expectedClusters := tt.providerClusters.Union(tt.consumerClusters).UnsortedList() + actualClusters := rb.Spec.Placement.ClusterAffinity.ClusterNames + + // Sort both slices before comparison + sort.Strings(expectedClusters) + sort.Strings(actualClusters) + + assert.Equal(t, expectedClusters, actualClusters, "Cluster names should match regardless of order") + + // Resource reference Check + assert.Equal(t, tt.svc.APIVersion, rb.Spec.Resource.APIVersion) + assert.Equal(t, tt.svc.Kind, rb.Spec.Resource.Kind) + assert.Equal(t, tt.svc.Namespace, rb.Spec.Resource.Namespace) + assert.Equal(t, tt.svc.Name, rb.Spec.Resource.Name) + assert.Equal(t, tt.svc.UID, rb.Spec.Resource.UID) + assert.Equal(t, tt.svc.ResourceVersion, rb.Spec.Resource.ResourceVersion) + }) + } +} + +func TestClaimMultiClusterServiceForService(t *testing.T) { + tests := []struct { + name string + svc *corev1.Service + mcs *networkingv1alpha1.MultiClusterService + updateError bool + expectedError bool + }{ + { + name: "Claim service for MCS - basic case", + svc: &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-service", + Namespace: "default", + }, + }, + mcs: &networkingv1alpha1.MultiClusterService{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-mcs", + Namespace: "default", + Labels: map[string]string{ + networkingv1alpha1.MultiClusterServicePermanentIDLabel: "test-id", + }, + }, + }, + }, + { + name: "Claim service for MCS - with existing labels and annotations", + svc: &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-service", + Namespace: "default", + Labels: map[string]string{ + "existing-label": "value", + policyv1alpha1.PropagationPolicyPermanentIDLabel: "should-be-removed", + }, + Annotations: map[string]string{ + "existing-annotation": "value", + policyv1alpha1.PropagationPolicyNameAnnotation: "should-be-removed", + }, + }, + }, + mcs: &networkingv1alpha1.MultiClusterService{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-mcs", + Namespace: "default", + Labels: map[string]string{ + networkingv1alpha1.MultiClusterServicePermanentIDLabel: "test-id", + }, + }, + }, + }, + { + name: "Claim service for MCS - update error", + svc: &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-service", + Namespace: "default", + }, + }, + mcs: &networkingv1alpha1.MultiClusterService{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-mcs", + Namespace: "default", + Labels: map[string]string{ + networkingv1alpha1.MultiClusterServicePermanentIDLabel: "test-id", + }, + }, + }, + updateError: true, + expectedError: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + controller := newFakeController(tt.svc) + if tt.updateError { + controller.Client = newFakeClientWithUpdateError(tt.svc, true) + } + + err := controller.claimMultiClusterServiceForService(context.Background(), tt.svc, tt.mcs) + + if tt.expectedError { + assert.Error(t, err) + return + } + + assert.NoError(t, err) + + updatedSvc := &corev1.Service{} + err = controller.Client.Get(context.Background(), types.NamespacedName{Namespace: tt.svc.Namespace, Name: tt.svc.Name}, updatedSvc) + assert.NoError(t, err) + + // Added labels and annotations check + assert.Equal(t, util.MultiClusterServiceKind, updatedSvc.Labels[util.ResourceTemplateClaimedByLabel]) + assert.Equal(t, "test-id", updatedSvc.Labels[networkingv1alpha1.MultiClusterServicePermanentIDLabel]) + assert.Equal(t, tt.mcs.Name, updatedSvc.Annotations[networkingv1alpha1.MultiClusterServiceNameAnnotation]) + assert.Equal(t, tt.mcs.Namespace, updatedSvc.Annotations[networkingv1alpha1.MultiClusterServiceNamespaceAnnotation]) + + // Removed labels and annotations check + assert.NotContains(t, updatedSvc.Labels, policyv1alpha1.PropagationPolicyPermanentIDLabel) + assert.NotContains(t, updatedSvc.Labels, policyv1alpha1.ClusterPropagationPolicyPermanentIDLabel) + assert.NotContains(t, updatedSvc.Annotations, policyv1alpha1.PropagationPolicyNameAnnotation) + assert.NotContains(t, updatedSvc.Annotations, policyv1alpha1.PropagationPolicyNamespaceAnnotation) + assert.NotContains(t, updatedSvc.Annotations, policyv1alpha1.ClusterPropagationPolicyAnnotation) + + // Check existing labels and annotations are preserved + if tt.svc.Labels != nil { + assert.Contains(t, updatedSvc.Labels, "existing-label") + } + if tt.svc.Annotations != nil { + assert.Contains(t, updatedSvc.Annotations, "existing-annotation") + } + }) + } +} + +func TestIsClusterReady(t *testing.T) { + tests := []struct { + name string + clusterName string + clusterObj *clusterv1alpha1.Cluster + expectedReady bool + }{ + { + name: "cluster is ready", + clusterName: "ready-cluster", + clusterObj: &clusterv1alpha1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "ready-cluster", + }, + Status: clusterv1alpha1.ClusterStatus{ + Conditions: []metav1.Condition{ + { + Type: clusterv1alpha1.ClusterConditionReady, + Status: metav1.ConditionTrue, + }, + }, + }, + }, + expectedReady: true, + }, + { + name: "cluster is not ready", + clusterName: "not-ready-cluster", + clusterObj: &clusterv1alpha1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "not-ready-cluster", + }, + Status: clusterv1alpha1.ClusterStatus{ + Conditions: []metav1.Condition{ + { + Type: clusterv1alpha1.ClusterConditionReady, + Status: metav1.ConditionFalse, + }, + }, + }, + }, + expectedReady: false, + }, + { + name: "cluster does not exist", + clusterName: "non-existent-cluster", + clusterObj: nil, + expectedReady: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var objects []runtime.Object + if tt.clusterObj != nil { + objects = append(objects, tt.clusterObj) + } + + controller := newFakeController(objects...) + + ready := controller.IsClusterReady(context.Background(), tt.clusterName) + assert.Equal(t, tt.expectedReady, ready, "IsClusterReady() result does not match expected") + }) + } +} + +func TestServiceHasCrossClusterMultiClusterService(t *testing.T) { + tests := []struct { + name string + svc *corev1.Service + mcs *networkingv1alpha1.MultiClusterService + expected bool + }{ + { + name: "Service has cross-cluster MCS", + svc: &corev1.Service{ObjectMeta: metav1.ObjectMeta{Name: "test-svc", Namespace: "default"}}, + mcs: &networkingv1alpha1.MultiClusterService{ + ObjectMeta: metav1.ObjectMeta{Name: "test-svc", Namespace: "default"}, + Spec: networkingv1alpha1.MultiClusterServiceSpec{ + Types: []networkingv1alpha1.ExposureType{networkingv1alpha1.ExposureType("CrossCluster")}, + }, + }, + expected: true, + }, + { + name: "Service has no cross-cluster MCS", + svc: &corev1.Service{ObjectMeta: metav1.ObjectMeta{Name: "test-svc", Namespace: "default"}}, + mcs: &networkingv1alpha1.MultiClusterService{ + ObjectMeta: metav1.ObjectMeta{Name: "test-svc", Namespace: "default"}, + Spec: networkingv1alpha1.MultiClusterServiceSpec{ + Types: []networkingv1alpha1.ExposureType{networkingv1alpha1.ExposureType("LocalCluster")}, + }, + }, + expected: false, + }, + { + name: "Service has no MCS", + svc: &corev1.Service{ObjectMeta: metav1.ObjectMeta{Name: "test-svc", Namespace: "default"}}, + mcs: nil, + expected: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + objs := []runtime.Object{tt.svc} + if tt.mcs != nil { + objs = append(objs, tt.mcs) + } + + controller := newFakeController(objs...) + + result := controller.serviceHasCrossClusterMultiClusterService(tt.svc) + assert.Equal(t, tt.expected, result) + }) + } +} + +func TestClusterMapFunc(t *testing.T) { + tests := []struct { + name string + object client.Object + mcsList []*networkingv1alpha1.MultiClusterService + expectedRequests []reconcile.Request + }{ + { + name: "Cluster matches MCS provider", + object: &clusterv1alpha1.Cluster{ObjectMeta: metav1.ObjectMeta{Name: "cluster1"}}, + mcsList: []*networkingv1alpha1.MultiClusterService{ + { + ObjectMeta: metav1.ObjectMeta{Name: "mcs1", Namespace: "default"}, + Spec: networkingv1alpha1.MultiClusterServiceSpec{ + Types: []networkingv1alpha1.ExposureType{networkingv1alpha1.ExposureType("CrossCluster")}, + ProviderClusters: []networkingv1alpha1.ClusterSelector{{Name: "cluster1"}}, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{Name: "mcs2", Namespace: "default"}, + Spec: networkingv1alpha1.MultiClusterServiceSpec{ + Types: []networkingv1alpha1.ExposureType{networkingv1alpha1.ExposureType("CrossCluster")}, + ProviderClusters: []networkingv1alpha1.ClusterSelector{{Name: "cluster2"}}, + }, + }, + }, + expectedRequests: []reconcile.Request{ + {NamespacedName: types.NamespacedName{Namespace: "default", Name: "mcs1"}}, + {NamespacedName: types.NamespacedName{Namespace: "default", Name: "mcs2"}}, + }, + }, + { + name: "Cluster doesn't match any MCS", + object: &clusterv1alpha1.Cluster{ObjectMeta: metav1.ObjectMeta{Name: "cluster3"}}, + mcsList: []*networkingv1alpha1.MultiClusterService{ + { + ObjectMeta: metav1.ObjectMeta{Name: "mcs1", Namespace: "default"}, + Spec: networkingv1alpha1.MultiClusterServiceSpec{ + Types: []networkingv1alpha1.ExposureType{networkingv1alpha1.ExposureType("CrossCluster")}, + ProviderClusters: []networkingv1alpha1.ClusterSelector{{Name: "cluster1"}}, + }, + }, + }, + expectedRequests: []reconcile.Request{ + {NamespacedName: types.NamespacedName{Namespace: "default", Name: "mcs1"}}, + }, + }, + { + name: "Empty MCS list", + object: &clusterv1alpha1.Cluster{ObjectMeta: metav1.ObjectMeta{Name: "cluster1"}}, + mcsList: []*networkingv1alpha1.MultiClusterService{}, + expectedRequests: []reconcile.Request{}, + }, + { + name: "Non-Cluster object", + object: &corev1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "pod1"}}, + mcsList: []*networkingv1alpha1.MultiClusterService{}, + expectedRequests: nil, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + objs := []runtime.Object{tt.object} + objs = append(objs, toRuntimeObjects(tt.mcsList)...) + + controller := newFakeController(objs...) + mapFunc := controller.clusterMapFunc() + + requests := mapFunc(context.Background(), tt.object) + + assert.Equal(t, len(tt.expectedRequests), len(requests), "Number of requests does not match expected") + assert.ElementsMatch(t, tt.expectedRequests, requests, "Requests do not match expected") + + if _, ok := tt.object.(*clusterv1alpha1.Cluster); ok { + for _, request := range requests { + found := false + for _, mcs := range tt.mcsList { + if mcs.Name == request.Name && mcs.Namespace == request.Namespace { + found = true + break + } + } + assert.True(t, found, "Generated request does not correspond to any MCS in the list") + } + } + }) + } +} + +func TestNeedSyncMultiClusterService(t *testing.T) { + tests := []struct { + name string + mcs *networkingv1alpha1.MultiClusterService + clusterName string + expectedNeed bool + expectedErr bool + }{ + { + name: "MCS with CrossCluster type and matching provider cluster", + mcs: &networkingv1alpha1.MultiClusterService{ + Spec: networkingv1alpha1.MultiClusterServiceSpec{ + Types: []networkingv1alpha1.ExposureType{networkingv1alpha1.ExposureType("CrossCluster")}, + ProviderClusters: []networkingv1alpha1.ClusterSelector{{Name: "cluster1"}}, + ConsumerClusters: []networkingv1alpha1.ClusterSelector{{Name: "cluster2"}}, + }, + }, + clusterName: "cluster1", + expectedNeed: true, + expectedErr: false, + }, + { + name: "MCS with CrossCluster type and matching consumer cluster", + mcs: &networkingv1alpha1.MultiClusterService{ + Spec: networkingv1alpha1.MultiClusterServiceSpec{ + Types: []networkingv1alpha1.ExposureType{networkingv1alpha1.ExposureType("CrossCluster")}, + ProviderClusters: []networkingv1alpha1.ClusterSelector{{Name: "cluster1"}}, + ConsumerClusters: []networkingv1alpha1.ClusterSelector{{Name: "cluster2"}}, + }, + }, + clusterName: "cluster2", + expectedNeed: true, + expectedErr: false, + }, + { + name: "MCS without CrossCluster type", + mcs: &networkingv1alpha1.MultiClusterService{ + Spec: networkingv1alpha1.MultiClusterServiceSpec{ + Types: []networkingv1alpha1.ExposureType{networkingv1alpha1.ExposureType("LocalCluster")}, + }, + }, + clusterName: "cluster1", + expectedNeed: false, + expectedErr: false, + }, + { + name: "MCS with empty ProviderClusters and ConsumerClusters", + mcs: &networkingv1alpha1.MultiClusterService{ + Spec: networkingv1alpha1.MultiClusterServiceSpec{ + Types: []networkingv1alpha1.ExposureType{networkingv1alpha1.ExposureType("CrossCluster")}, + }, + }, + clusterName: "cluster1", + expectedNeed: true, + expectedErr: false, + }, + { + name: "Cluster doesn't match ProviderClusters or ConsumerClusters", + mcs: &networkingv1alpha1.MultiClusterService{ + Spec: networkingv1alpha1.MultiClusterServiceSpec{ + Types: []networkingv1alpha1.ExposureType{networkingv1alpha1.ExposureType("CrossCluster")}, + ProviderClusters: []networkingv1alpha1.ClusterSelector{{Name: "cluster1"}}, + ConsumerClusters: []networkingv1alpha1.ClusterSelector{{Name: "cluster2"}}, + }, + }, + clusterName: "cluster3", + expectedNeed: false, + expectedErr: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + clusters := createClustersFromMCS(tt.mcs) + objs := append([]runtime.Object{tt.mcs}, toRuntimeObjects(clusters)...) + + controller := newFakeController(objs...) + need, err := controller.needSyncMultiClusterService(tt.mcs, tt.clusterName) + + assert.Equal(t, tt.expectedNeed, need, "Expected need %v, but got %v", tt.expectedNeed, need) + if tt.expectedErr { + assert.Error(t, err, "Expected an error, but got none") + } else { + assert.NoError(t, err, "Expected no error, but got %v", err) + } + }) + } +} + +// Helper Functions + +// Helper function to create fake Cluster objects based on the MCS spec +func createClustersFromMCS(mcs *networkingv1alpha1.MultiClusterService) []*clusterv1alpha1.Cluster { + var clusters []*clusterv1alpha1.Cluster + for _, pc := range mcs.Spec.ProviderClusters { + clusters = append(clusters, &clusterv1alpha1.Cluster{ + ObjectMeta: metav1.ObjectMeta{Name: pc.Name}, + }) + } + for _, cc := range mcs.Spec.ConsumerClusters { + clusters = append(clusters, &clusterv1alpha1.Cluster{ + ObjectMeta: metav1.ObjectMeta{Name: cc.Name}, + }) + } + return clusters +} + +// Helper function to set up a scheme with all necessary types +func setupScheme() *runtime.Scheme { + s := runtime.NewScheme() + _ = corev1.AddToScheme(s) + _ = networkingv1alpha1.Install(s) + _ = workv1alpha1.Install(s) + _ = workv1alpha2.Install(s) + _ = clusterv1alpha1.Install(s) + _ = scheme.AddToScheme(s) + return s +} + +// Helper function to create a new MCSController with a fake client +func newFakeController(objs ...runtime.Object) *MCSController { + s := setupScheme() + fakeClient := fake.NewClientBuilder().WithScheme(s).WithRuntimeObjects(objs...).Build() + return &MCSController{ + Client: fakeClient, + EventRecorder: record.NewFakeRecorder(100), + } +} + +// Helper function to convert a slice of objects to a slice of runtime.Object +func toRuntimeObjects(objs interface{}) []runtime.Object { + var result []runtime.Object + switch v := objs.(type) { + case []*workv1alpha1.Work: + for _, obj := range v { + result = append(result, obj) + } + case []*clusterv1alpha1.Cluster: + for _, obj := range v { + result = append(result, obj) + } + case []*networkingv1alpha1.MultiClusterService: + for _, obj := range v { + result = append(result, obj) + } + } + return result +} + +// Helper function to create a fake client that can simulate update errors +func newFakeClientWithUpdateError(svc *corev1.Service, shouldError bool) client.Client { + s := runtime.NewScheme() + _ = corev1.AddToScheme(s) + _ = networkingv1alpha1.Install(s) + + fakeClient := fake.NewClientBuilder().WithScheme(s).WithRuntimeObjects(svc).Build() + + if shouldError { + return &errorInjectingClient{ + Client: fakeClient, + shouldError: shouldError, + } + } + + return fakeClient +} + +type errorInjectingClient struct { + client.Client + shouldError bool +} + +func (c *errorInjectingClient) Update(ctx context.Context, obj client.Object, opts ...client.UpdateOption) error { + if c.shouldError { + return fmt.Errorf("simulated update error") + } + return c.Client.Update(ctx, obj, opts...) +} diff --git a/pkg/controllers/remediation/remedy_controller_test.go b/pkg/controllers/remediation/remedy_controller_test.go new file mode 100644 index 000000000000..159e51483bac --- /dev/null +++ b/pkg/controllers/remediation/remedy_controller_test.go @@ -0,0 +1,207 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package remediation + +import ( + "context" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/rest" + controllerruntime "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + clusterv1alpha1 "github.com/karmada-io/karmada/pkg/apis/cluster/v1alpha1" + remedyv1alpha1 "github.com/karmada-io/karmada/pkg/apis/remedy/v1alpha1" +) + +func TestReconcile(t *testing.T) { + tests := []struct { + name string + existingObjs []runtime.Object + inputCluster *clusterv1alpha1.Cluster + expectedError bool + expectedActions []string + expectedRequeue bool + }{ + { + name: "Cluster not found", + existingObjs: []runtime.Object{}, + inputCluster: &clusterv1alpha1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "non-existent-cluster", + }, + }, + expectedError: false, + expectedActions: nil, + expectedRequeue: false, + }, + { + name: "Cluster with no matching remedies", + existingObjs: []runtime.Object{ + &clusterv1alpha1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cluster", + }, + Status: clusterv1alpha1.ClusterStatus{ + Conditions: []metav1.Condition{ + { + Type: clusterv1alpha1.ClusterConditionReady, + Status: metav1.ConditionTrue, + }, + }, + }, + }, + }, + inputCluster: &clusterv1alpha1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cluster", + }, + }, + expectedError: false, + expectedActions: nil, + expectedRequeue: false, + }, + { + name: "Cluster being deleted", + existingObjs: []runtime.Object{ + &clusterv1alpha1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "deleting-cluster", + DeletionTimestamp: &metav1.Time{Time: time.Now()}, + Finalizers: []string{"test-finalizer"}, + }, + }, + }, + inputCluster: &clusterv1alpha1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "deleting-cluster", + }, + }, + expectedError: false, + expectedActions: nil, + expectedRequeue: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + scheme := setupScheme() + fakeClient := createFakeClient(scheme, tt.existingObjs...) + controller := &RemedyController{Client: fakeClient} + + req := reconcile.Request{ + NamespacedName: types.NamespacedName{Name: tt.inputCluster.Name}, + } + result, err := controller.Reconcile(context.Background(), req) + + if tt.expectedError { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + + // Assert requeue result + assert.Equal(t, tt.expectedRequeue, result.Requeue, "Unexpected requeue result") + + // Assert remedy actions if expected + if tt.expectedActions != nil { + updatedCluster := &clusterv1alpha1.Cluster{} + err := fakeClient.Get(context.Background(), req.NamespacedName, updatedCluster) + assert.NoError(t, err, "Failed to get updated cluster") + assert.Equal(t, tt.expectedActions, updatedCluster.Status.RemedyActions, "Unexpected remedy actions") + } + }) + } +} + +func TestSetupWithManager(t *testing.T) { + scheme := setupScheme() + tests := []struct { + name string + controllerSetup func() *RemedyController + }{ + { + name: "setup with valid client", + controllerSetup: func() *RemedyController { + return &RemedyController{Client: createFakeClient(scheme)} + }, + }, + { + name: "setup with nil client", + controllerSetup: func() *RemedyController { + return &RemedyController{Client: nil} + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + mgr, err := setupManager(scheme) + require.NoError(t, err) + + controller := tt.controllerSetup() + err = controller.SetupWithManager(mgr) + + assert.NoError(t, err) + }) + } +} + +func TestSetupWatches(t *testing.T) { + scheme := setupScheme() + + mgr, err := setupManager(scheme) + require.NoError(t, err) + + controller := &RemedyController{Client: mgr.GetClient()} + + remedyController, err := controllerruntime.NewControllerManagedBy(mgr). + For(&clusterv1alpha1.Cluster{}). + Build(controller) + require.NoError(t, err) + + err = controller.setupWatches(remedyController, mgr) + assert.NoError(t, err) +} + +// Helper Functions + +// Helper function to set up the scheme +func setupScheme() *runtime.Scheme { + scheme := runtime.NewScheme() + _ = clusterv1alpha1.Install(scheme) + _ = remedyv1alpha1.Install(scheme) + return scheme +} + +// Helper function to create a fake client +func createFakeClient(scheme *runtime.Scheme, objs ...runtime.Object) client.Client { + return fake.NewClientBuilder().WithScheme(scheme).WithRuntimeObjects(objs...).Build() +} + +// Helper function to set up the manager +func setupManager(scheme *runtime.Scheme) (controllerruntime.Manager, error) { + return controllerruntime.NewManager(&rest.Config{}, controllerruntime.Options{Scheme: scheme}) +} diff --git a/pkg/controllers/status/cluster_status_controller.go b/pkg/controllers/status/cluster_status_controller.go index 5fd14445ceb5..5eb0844623e5 100644 --- a/pkg/controllers/status/cluster_status_controller.go +++ b/pkg/controllers/status/cluster_status_controller.go @@ -451,9 +451,9 @@ func getClusterHealthStatus(clusterClient *util.ClusterClient) (online, healthy return true, true } -func healthEndpointCheck(client *clientset.Clientset, path string) (int, error) { +func healthEndpointCheck(client clientset.Interface, path string) (int, error) { var healthStatus int - resp := client.DiscoveryClient.RESTClient().Get().AbsPath(path).Do(context.TODO()).StatusCode(&healthStatus) + resp := client.Discovery().RESTClient().Get().AbsPath(path).Do(context.TODO()).StatusCode(&healthStatus) return healthStatus, resp.Error() } diff --git a/pkg/controllers/status/work_status_controller.go b/pkg/controllers/status/work_status_controller.go index 90a21237ead9..591e962cbc0f 100644 --- a/pkg/controllers/status/work_status_controller.go +++ b/pkg/controllers/status/work_status_controller.go @@ -38,6 +38,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/predicate" clusterv1alpha1 "github.com/karmada-io/karmada/pkg/apis/cluster/v1alpha1" + configv1alpha1 "github.com/karmada-io/karmada/pkg/apis/config/v1alpha1" workv1alpha1 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha1" workv1alpha2 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2" "github.com/karmada-io/karmada/pkg/events" @@ -365,20 +366,7 @@ func (c *WorkStatusController) reflectStatus(ctx context.Context, work *workv1al } c.EventRecorder.Eventf(work, corev1.EventTypeNormal, events.EventReasonReflectStatusSucceed, "Reflect status for object(%s/%s/%s) succeed.", clusterObj.GetKind(), clusterObj.GetNamespace(), clusterObj.GetName()) - var resourceHealth workv1alpha1.ResourceHealth - // When an unregistered resource kind is requested with the ResourceInterpreter, - // the interpreter will return an error, we treat its health status as Unknown. - healthy, err := c.ResourceInterpreter.InterpretHealth(clusterObj) - if err != nil { - resourceHealth = workv1alpha1.ResourceUnknown - c.EventRecorder.Eventf(work, corev1.EventTypeWarning, events.EventReasonInterpretHealthFailed, "Interpret health of object(%s/%s/%s) failed, err: %s.", clusterObj.GetKind(), clusterObj.GetNamespace(), clusterObj.GetName(), err.Error()) - } else if healthy { - resourceHealth = workv1alpha1.ResourceHealthy - c.EventRecorder.Eventf(work, corev1.EventTypeNormal, events.EventReasonInterpretHealthSucceed, "Interpret health of object(%s/%s/%s) as healthy.", clusterObj.GetKind(), clusterObj.GetNamespace(), clusterObj.GetName()) - } else { - resourceHealth = workv1alpha1.ResourceUnhealthy - c.EventRecorder.Eventf(work, corev1.EventTypeNormal, events.EventReasonInterpretHealthSucceed, "Interpret health of object(%s/%s/%s) as unhealthy.", clusterObj.GetKind(), clusterObj.GetNamespace(), clusterObj.GetName()) - } + resourceHealth := c.interpretHealth(clusterObj, work) identifier, err := c.buildStatusIdentifier(work, clusterObj) if err != nil { @@ -400,6 +388,28 @@ func (c *WorkStatusController) reflectStatus(ctx context.Context, work *workv1al }) } +func (c *WorkStatusController) interpretHealth(clusterObj *unstructured.Unstructured, work *workv1alpha1.Work) workv1alpha1.ResourceHealth { + // For kind that doesn't have health check, we treat it as healthy. + if !c.ResourceInterpreter.HookEnabled(clusterObj.GroupVersionKind(), configv1alpha1.InterpreterOperationInterpretHealth) { + klog.V(5).Infof("skipping health assessment for object: %v %s/%s as missing customization and will treat it as healthy.", clusterObj.GroupVersionKind(), clusterObj.GetNamespace(), clusterObj.GetName()) + return workv1alpha1.ResourceHealthy + } + + var resourceHealth workv1alpha1.ResourceHealth + healthy, err := c.ResourceInterpreter.InterpretHealth(clusterObj) + if err != nil { + resourceHealth = workv1alpha1.ResourceUnknown + c.EventRecorder.Eventf(work, corev1.EventTypeWarning, events.EventReasonInterpretHealthFailed, "Interpret health of object(%s/%s/%s) failed, err: %s.", clusterObj.GetKind(), clusterObj.GetNamespace(), clusterObj.GetName(), err.Error()) + } else if healthy { + resourceHealth = workv1alpha1.ResourceHealthy + c.EventRecorder.Eventf(work, corev1.EventTypeNormal, events.EventReasonInterpretHealthSucceed, "Interpret health of object(%s/%s/%s) as healthy.", clusterObj.GetKind(), clusterObj.GetNamespace(), clusterObj.GetName()) + } else { + resourceHealth = workv1alpha1.ResourceUnhealthy + c.EventRecorder.Eventf(work, corev1.EventTypeNormal, events.EventReasonInterpretHealthSucceed, "Interpret health of object(%s/%s/%s) as unhealthy.", clusterObj.GetKind(), clusterObj.GetNamespace(), clusterObj.GetName()) + } + return resourceHealth +} + func (c *WorkStatusController) buildStatusIdentifier(work *workv1alpha1.Work, clusterObj *unstructured.Unstructured) (*workv1alpha1.ResourceIdentifier, error) { manifestRef := helper.ManifestReference{APIVersion: clusterObj.GetAPIVersion(), Kind: clusterObj.GetKind(), Namespace: clusterObj.GetNamespace(), Name: clusterObj.GetName()} diff --git a/pkg/controllers/status/work_status_controller_test.go b/pkg/controllers/status/work_status_controller_test.go index 11563539d767..24d2f5f29d6b 100644 --- a/pkg/controllers/status/work_status_controller_test.go +++ b/pkg/controllers/status/work_status_controller_test.go @@ -25,22 +25,26 @@ import ( "github.com/stretchr/testify/assert" corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/uuid" dynamicfake "k8s.io/client-go/dynamic/fake" "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/tools/record" "k8s.io/utils/ptr" controllerruntime "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" clusterv1alpha1 "github.com/karmada-io/karmada/pkg/apis/cluster/v1alpha1" workv1alpha1 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha1" workv1alpha2 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2" + "github.com/karmada-io/karmada/pkg/events" "github.com/karmada-io/karmada/pkg/resourceinterpreter/default/native" "github.com/karmada-io/karmada/pkg/sharedcli/ratelimiterflag" "github.com/karmada-io/karmada/pkg/util" @@ -723,6 +727,9 @@ func newWorkStatusController(cluster *clusterv1alpha1.Cluster, dynamicClientSets m.Add(corev1.SchemeGroupVersion.WithKind("Pod"), meta.RESTScopeNamespace) return m }(), + ResourceInterpreter: FakeResourceInterpreter{ + DefaultInterpreter: native.NewDefaultInterpreter(), + }, } if len(dynamicClientSets) > 0 { @@ -1021,3 +1028,47 @@ func TestWorkStatusController_registerInformersAndStart(t *testing.T) { assert.NotEmpty(t, err) }) } + +func TestWorkStatusController_interpretHealth(t *testing.T) { + tests := []struct { + name string + clusterObj client.Object + expectedResourceHealth workv1alpha1.ResourceHealth + expectedEventReason string + }{ + { + name: "deployment without status is interpreted as unhealthy", + clusterObj: testhelper.NewDeployment("foo", "bar"), + expectedResourceHealth: workv1alpha1.ResourceUnhealthy, + expectedEventReason: events.EventReasonInterpretHealthSucceed, + }, + { + name: "cluster role without status is interpreted as healthy", + clusterObj: testhelper.NewClusterRole("foo", []rbacv1.PolicyRule{}), + expectedResourceHealth: workv1alpha1.ResourceHealthy, + }, + } + + cluster := newCluster("cluster", clusterv1alpha1.ClusterConditionReady, metav1.ConditionTrue) + c := newWorkStatusController(cluster) + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + work := testhelper.NewWork(tt.clusterObj.GetName(), tt.clusterObj.GetNamespace(), string(uuid.NewUUID()), []byte{}) + obj, err := helper.ToUnstructured(tt.clusterObj) + assert.NoError(t, err) + + resourceHealth := c.interpretHealth(obj, work) + assert.Equalf(t, tt.expectedResourceHealth, resourceHealth, "expected resource health %v, got %v", tt.expectedResourceHealth, resourceHealth) + + eventRecorder := c.EventRecorder.(*record.FakeRecorder) + if tt.expectedEventReason == "" { + assert.Empty(t, eventRecorder.Events, "expected no events to get recorded") + } else { + assert.Equal(t, 1, len(eventRecorder.Events)) + e := <-eventRecorder.Events + assert.Containsf(t, e, tt.expectedEventReason, "expected event reason %v, got %v", tt.expectedEventReason, e) + } + }) + } +} diff --git a/pkg/controllers/unifiedauth/unified_auth_controller_test.go b/pkg/controllers/unifiedauth/unified_auth_controller_test.go index 7122af35c8ca..8b47dfd35ba0 100644 --- a/pkg/controllers/unifiedauth/unified_auth_controller_test.go +++ b/pkg/controllers/unifiedauth/unified_auth_controller_test.go @@ -18,18 +18,23 @@ package unifiedauth import ( "context" + "fmt" "reflect" "testing" + "github.com/stretchr/testify/assert" rbacv1 "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/tools/record" "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" fakeclient "sigs.k8s.io/controller-runtime/pkg/client/fake" "sigs.k8s.io/controller-runtime/pkg/reconcile" clusterv1alpha1 "github.com/karmada-io/karmada/pkg/apis/cluster/v1alpha1" + workv1alpha1 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha1" "github.com/karmada-io/karmada/pkg/util" "github.com/karmada-io/karmada/pkg/util/gclient" ) @@ -122,25 +127,16 @@ func TestController_generateRequestsFromClusterRole(t *testing.T) { cluster2 := &clusterv1alpha1.Cluster{ObjectMeta: metav1.ObjectMeta{Name: "member2"}} cluster3 := &clusterv1alpha1.Cluster{ObjectMeta: metav1.ObjectMeta{Name: "member3"}} - type fields struct { - Client client.Client - EventRecorder record.EventRecorder - } type args struct { clusterRole *rbacv1.ClusterRole } tests := []struct { - name string - fields fields - args args - want []reconcile.Request + name string + args args + want []reconcile.Request }{ { name: "specify resource names of cluster.karmada.io with cluster/proxy resource", - fields: fields{ - Client: fakeclient.NewClientBuilder().WithScheme(gclient.NewSchema()).WithObjects(cluster1, cluster2, cluster3).Build(), - EventRecorder: record.NewFakeRecorder(1024), - }, args: args{ clusterRole: &rbacv1.ClusterRole{ ObjectMeta: metav1.ObjectMeta{Name: "cluster-proxy"}, @@ -158,10 +154,6 @@ func TestController_generateRequestsFromClusterRole(t *testing.T) { }, { name: "specify cluster.karmada.io with cluster/proxy resource", - fields: fields{ - Client: fakeclient.NewClientBuilder().WithScheme(gclient.NewSchema()).WithObjects(cluster1, cluster2, cluster3).Build(), - EventRecorder: record.NewFakeRecorder(1024), - }, args: args{ clusterRole: &rbacv1.ClusterRole{ ObjectMeta: metav1.ObjectMeta{Name: "cluster-proxy"}, @@ -179,10 +171,6 @@ func TestController_generateRequestsFromClusterRole(t *testing.T) { }, { name: "specify cluster.karmada.io with wildcard resource", - fields: fields{ - Client: fakeclient.NewClientBuilder().WithScheme(gclient.NewSchema()).WithObjects(cluster1, cluster2, cluster3).Build(), - EventRecorder: record.NewFakeRecorder(1024), - }, args: args{ clusterRole: &rbacv1.ClusterRole{ ObjectMeta: metav1.ObjectMeta{Name: "cluster-proxy"}, @@ -200,10 +188,6 @@ func TestController_generateRequestsFromClusterRole(t *testing.T) { }, { name: "specify search.karmada.io with proxying/proxy resource", - fields: fields{ - Client: fakeclient.NewClientBuilder().WithScheme(gclient.NewSchema()).WithObjects(cluster1, cluster2, cluster3).Build(), - EventRecorder: record.NewFakeRecorder(1024), - }, args: args{ clusterRole: &rbacv1.ClusterRole{ ObjectMeta: metav1.ObjectMeta{Name: "search-proxy"}, @@ -221,10 +205,6 @@ func TestController_generateRequestsFromClusterRole(t *testing.T) { }, { name: "specify search.karmada.io with wildcard resource", - fields: fields{ - Client: fakeclient.NewClientBuilder().WithScheme(gclient.NewSchema()).WithObjects(cluster1, cluster2, cluster3).Build(), - EventRecorder: record.NewFakeRecorder(1024), - }, args: args{ clusterRole: &rbacv1.ClusterRole{ ObjectMeta: metav1.ObjectMeta{Name: "search-proxy"}, @@ -242,10 +222,6 @@ func TestController_generateRequestsFromClusterRole(t *testing.T) { }, { name: "specify wildcard apiGroups with wildcard resource", - fields: fields{ - Client: fakeclient.NewClientBuilder().WithScheme(gclient.NewSchema()).WithObjects(cluster1, cluster2, cluster3).Build(), - EventRecorder: record.NewFakeRecorder(1024), - }, args: args{ clusterRole: &rbacv1.ClusterRole{ ObjectMeta: metav1.ObjectMeta{Name: "wildcard"}, @@ -265,8 +241,8 @@ func TestController_generateRequestsFromClusterRole(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { c := &Controller{ - Client: tt.fields.Client, - EventRecorder: tt.fields.EventRecorder, + Client: fakeclient.NewClientBuilder().WithScheme(gclient.NewSchema()).WithObjects(cluster1, cluster2, cluster3).Build(), + EventRecorder: record.NewFakeRecorder(1024), } if got := c.generateRequestsFromClusterRole(context.Background(), tt.args.clusterRole); !reflect.DeepEqual(got, tt.want) { t.Errorf("generateRequestsFromClusterRole() = %v, want %v", got, tt.want) @@ -274,3 +250,455 @@ func TestController_generateRequestsFromClusterRole(t *testing.T) { }) } } + +func TestController_buildImpersonationClusterRole(t *testing.T) { + tests := []struct { + name string + cluster *clusterv1alpha1.Cluster + rules []rbacv1.PolicyRule + expectedWorks int + }{ + { + name: "successful creation", + cluster: &clusterv1alpha1.Cluster{ + ObjectMeta: metav1.ObjectMeta{Name: "test-cluster"}, + Spec: clusterv1alpha1.ClusterSpec{SyncMode: clusterv1alpha1.Push}, + }, + rules: []rbacv1.PolicyRule{ + { + Verbs: []string{"impersonate"}, + APIGroups: []string{""}, + Resources: []string{"users", "groups", "serviceaccounts"}, + }, + }, + expectedWorks: 1, + }, + { + name: "cluster with no sync mode", + cluster: &clusterv1alpha1.Cluster{ + ObjectMeta: metav1.ObjectMeta{Name: "no-sync-cluster"}, + }, + rules: []rbacv1.PolicyRule{}, + expectedWorks: 1, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + s := setupTestScheme() + + c := &Controller{ + Client: fake.NewClientBuilder().WithScheme(s).WithObjects(tt.cluster).Build(), + EventRecorder: record.NewFakeRecorder(1024), + } + + err := c.buildImpersonationClusterRole(context.Background(), tt.cluster, tt.rules) + assert.NoError(t, err) + + var createdWorks workv1alpha1.WorkList + err = c.Client.List(context.Background(), &createdWorks, &client.ListOptions{ + Namespace: generateExecutionSpaceName(tt.cluster.Name), + }) + assert.NoError(t, err) + assert.Equal(t, tt.expectedWorks, len(createdWorks.Items)) + }) + } +} + +func TestController_buildImpersonationClusterRoleBinding(t *testing.T) { + tests := []struct { + name string + cluster *clusterv1alpha1.Cluster + expectedWorks int + expectPanic bool + }{ + { + name: "successful creation", + cluster: &clusterv1alpha1.Cluster{ + ObjectMeta: metav1.ObjectMeta{Name: "test-cluster"}, + Spec: clusterv1alpha1.ClusterSpec{ + SyncMode: clusterv1alpha1.Push, + ImpersonatorSecretRef: &clusterv1alpha1.LocalSecretReference{ + Namespace: "karmada-system", + Name: "test-secret", + }, + }, + }, + expectedWorks: 1, + expectPanic: false, + }, + { + name: "cluster with no impersonator secret", + cluster: &clusterv1alpha1.Cluster{ + ObjectMeta: metav1.ObjectMeta{Name: "no-secret-cluster"}, + Spec: clusterv1alpha1.ClusterSpec{SyncMode: clusterv1alpha1.Push}, + }, + expectedWorks: 0, + expectPanic: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + s := setupTestScheme() + + c := &Controller{ + Client: fake.NewClientBuilder().WithScheme(s).WithObjects(tt.cluster).Build(), + EventRecorder: record.NewFakeRecorder(1024), + } + + if tt.expectPanic { + assert.Panics(t, func() { + _ = c.buildImpersonationClusterRoleBinding(context.Background(), tt.cluster) + }, "Expected buildImpersonationClusterRoleBinding to panic, but it didn't") + } else { + err := c.buildImpersonationClusterRoleBinding(context.Background(), tt.cluster) + assert.NoError(t, err) + + var createdWorks workv1alpha1.WorkList + err = c.Client.List(context.Background(), &createdWorks, &client.ListOptions{ + Namespace: generateExecutionSpaceName(tt.cluster.Name), + }) + assert.NoError(t, err) + assert.Equal(t, tt.expectedWorks, len(createdWorks.Items)) + } + }) + } +} + +func TestController_Reconcile(t *testing.T) { + tests := []struct { + name string + cluster *clusterv1alpha1.Cluster + expectedResult reconcile.Result + expectedErrMsg string + expectedEvents []string + }{ + { + name: "successful reconciliation", + cluster: &clusterv1alpha1.Cluster{ + ObjectMeta: metav1.ObjectMeta{Name: "test-cluster"}, + Spec: clusterv1alpha1.ClusterSpec{ + SyncMode: clusterv1alpha1.Push, + ImpersonatorSecretRef: &clusterv1alpha1.LocalSecretReference{ + Namespace: "karmada-system", + Name: "test-secret", + }, + }, + }, + expectedResult: reconcile.Result{}, + expectedEvents: []string{"Normal SyncImpersonationConfigSucceed Sync impersonation config succeed."}, + }, + { + name: "cluster not found", + cluster: &clusterv1alpha1.Cluster{ + ObjectMeta: metav1.ObjectMeta{Name: "non-existent-cluster"}, + }, + expectedResult: reconcile.Result{}, + }, + { + name: "cluster being deleted", + cluster: &clusterv1alpha1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "deleting-cluster", + DeletionTimestamp: &metav1.Time{Time: metav1.Now().Time}, + Finalizers: []string{"test-finalizer"}, + }, + }, + expectedResult: reconcile.Result{}, + }, + { + name: "cluster without impersonator secret", + cluster: &clusterv1alpha1.Cluster{ + ObjectMeta: metav1.ObjectMeta{Name: "no-secret-cluster"}, + Spec: clusterv1alpha1.ClusterSpec{ + SyncMode: clusterv1alpha1.Push, + }, + }, + expectedResult: reconcile.Result{}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + s := setupTestScheme() + + fakeRecorder := record.NewFakeRecorder(10) + c := &Controller{ + Client: fake.NewClientBuilder().WithScheme(s).WithObjects(tt.cluster).Build(), + EventRecorder: fakeRecorder, + } + + result, err := c.Reconcile(context.Background(), reconcile.Request{ + NamespacedName: types.NamespacedName{Name: tt.cluster.Name}, + }) + + assert.Equal(t, tt.expectedResult, result) + + if tt.expectedErrMsg != "" { + assert.Error(t, err) + assert.Contains(t, err.Error(), tt.expectedErrMsg) + } else { + assert.NoError(t, err) + } + + close(fakeRecorder.Events) + var actualEvents []string + for event := range fakeRecorder.Events { + actualEvents = append(actualEvents, event) + } + assert.Equal(t, tt.expectedEvents, actualEvents) + }) + } +} + +func TestController_syncImpersonationConfig(t *testing.T) { + tests := []struct { + name string + cluster *clusterv1alpha1.Cluster + existingRBAC []client.Object + expectedErrMsg string + expectedWorks int + }{ + { + name: "successful sync", + cluster: &clusterv1alpha1.Cluster{ + ObjectMeta: metav1.ObjectMeta{Name: "test-cluster"}, + Spec: clusterv1alpha1.ClusterSpec{ + SyncMode: clusterv1alpha1.Push, + ImpersonatorSecretRef: &clusterv1alpha1.LocalSecretReference{ + Namespace: "karmada-system", + Name: "test-secret", + }, + }, + }, + existingRBAC: []client.Object{ + &rbacv1.ClusterRole{ + ObjectMeta: metav1.ObjectMeta{Name: "test-role"}, + Rules: []rbacv1.PolicyRule{ + { + Verbs: []string{"*"}, + APIGroups: []string{"cluster.karmada.io"}, + Resources: []string{"clusters/proxy"}, + ResourceNames: []string{"test-cluster"}, + }, + }, + }, + &rbacv1.ClusterRoleBinding{ + ObjectMeta: metav1.ObjectMeta{Name: "test-binding"}, + RoleRef: rbacv1.RoleRef{ + APIGroup: rbacv1.GroupName, + Kind: "ClusterRole", + Name: "test-role", + }, + Subjects: []rbacv1.Subject{ + {Kind: "User", Name: "test-user"}, + }, + }, + }, + expectedWorks: 2, // One for ClusterRole, one for ClusterRoleBinding + }, + { + name: "no matching RBAC", + cluster: &clusterv1alpha1.Cluster{ + ObjectMeta: metav1.ObjectMeta{Name: "test-cluster"}, + Spec: clusterv1alpha1.ClusterSpec{ + SyncMode: clusterv1alpha1.Push, + ImpersonatorSecretRef: &clusterv1alpha1.LocalSecretReference{ + Namespace: "karmada-system", + Name: "test-secret", + }, + }, + }, + expectedWorks: 2, // One for ClusterRole, one for ClusterRoleBinding + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + s := setupTestScheme() + + builder := fake.NewClientBuilder().WithScheme(s).WithObjects(tt.cluster) + for _, obj := range tt.existingRBAC { + builder = builder.WithObjects(obj) + } + + c := &Controller{ + Client: builder.Build(), + EventRecorder: record.NewFakeRecorder(10), + } + + err := c.syncImpersonationConfig(context.Background(), tt.cluster) + + if tt.expectedErrMsg != "" { + assert.Error(t, err) + assert.Contains(t, err.Error(), tt.expectedErrMsg) + } else { + assert.NoError(t, err) + + var createdWorks workv1alpha1.WorkList + err = c.Client.List(context.Background(), &createdWorks, &client.ListOptions{ + Namespace: generateExecutionSpaceName(tt.cluster.Name), + }) + assert.NoError(t, err) + assert.Equal(t, tt.expectedWorks, len(createdWorks.Items)) + } + }) + } +} + +func TestController_newClusterRoleMapFunc(t *testing.T) { + tests := []struct { + name string + clusterRole *rbacv1.ClusterRole + expectedLength int + }{ + { + name: "ClusterRole with matching cluster.karmada.io rule", + clusterRole: &rbacv1.ClusterRole{ + ObjectMeta: metav1.ObjectMeta{Name: "test-role"}, + Rules: []rbacv1.PolicyRule{ + { + APIGroups: []string{"cluster.karmada.io"}, + Resources: []string{"clusters/proxy"}, + ResourceNames: []string{"cluster1", "cluster2"}, + }, + }, + }, + expectedLength: 2, + }, + { + name: "ClusterRole with matching search.karmada.io rule", + clusterRole: &rbacv1.ClusterRole{ + ObjectMeta: metav1.ObjectMeta{Name: "search-role"}, + Rules: []rbacv1.PolicyRule{ + { + APIGroups: []string{"search.karmada.io"}, + Resources: []string{"proxying/proxy"}, + }, + }, + }, + expectedLength: 1, // 1 because we're not actually listing clusters in this test + }, + { + name: "ClusterRole without matching rules", + clusterRole: &rbacv1.ClusterRole{ + ObjectMeta: metav1.ObjectMeta{Name: "non-matching-role"}, + Rules: []rbacv1.PolicyRule{ + { + APIGroups: []string{""}, + Resources: []string{"pods"}, + }, + }, + }, + expectedLength: 0, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + scheme := setupTestScheme() + + fakeClient := fake.NewClientBuilder(). + WithScheme(scheme). + WithRuntimeObjects(&clusterv1alpha1.Cluster{ObjectMeta: metav1.ObjectMeta{Name: "test-cluster"}}). + Build() + + c := &Controller{ + Client: fakeClient, + EventRecorder: record.NewFakeRecorder(1024), + } + + mapFunc := c.newClusterRoleMapFunc() + result := mapFunc(context.Background(), tt.clusterRole) + assert.Len(t, result, tt.expectedLength) + }) + } +} + +func TestController_newClusterRoleBindingMapFunc(t *testing.T) { + tests := []struct { + name string + clusterRoleBinding *rbacv1.ClusterRoleBinding + clusterRole *rbacv1.ClusterRole + expectedLength int + }{ + { + name: "ClusterRoleBinding with matching ClusterRole", + clusterRoleBinding: &rbacv1.ClusterRoleBinding{ + ObjectMeta: metav1.ObjectMeta{Name: "test-binding"}, + RoleRef: rbacv1.RoleRef{ + Kind: "ClusterRole", + Name: "test-role", + }, + }, + clusterRole: &rbacv1.ClusterRole{ + ObjectMeta: metav1.ObjectMeta{Name: "test-role"}, + Rules: []rbacv1.PolicyRule{ + { + APIGroups: []string{"cluster.karmada.io"}, + Resources: []string{"clusters/proxy"}, + ResourceNames: []string{"cluster1", "cluster2"}, + }, + }, + }, + expectedLength: 2, + }, + { + name: "ClusterRoleBinding with non-matching ClusterRole", + clusterRoleBinding: &rbacv1.ClusterRoleBinding{ + ObjectMeta: metav1.ObjectMeta{Name: "non-matching-binding"}, + RoleRef: rbacv1.RoleRef{ + Kind: "ClusterRole", + Name: "non-matching-role", + }, + }, + clusterRole: &rbacv1.ClusterRole{ + ObjectMeta: metav1.ObjectMeta{Name: "non-matching-role"}, + Rules: []rbacv1.PolicyRule{ + { + APIGroups: []string{""}, + Resources: []string{"pods"}, + }, + }, + }, + expectedLength: 0, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + scheme := setupTestScheme() + + fakeClient := fake.NewClientBuilder(). + WithScheme(scheme). + WithRuntimeObjects(tt.clusterRole, &clusterv1alpha1.Cluster{ObjectMeta: metav1.ObjectMeta{Name: "test-cluster"}}). + Build() + + c := &Controller{ + Client: fakeClient, + EventRecorder: record.NewFakeRecorder(1024), + } + + mapFunc := c.newClusterRoleBindingMapFunc() + result := mapFunc(context.Background(), tt.clusterRoleBinding) + assert.Len(t, result, tt.expectedLength) + }) + } +} + +// Helper Functions + +// generateExecutionSpaceName is a helper function to generate the execution space name +func generateExecutionSpaceName(clusterName string) string { + return fmt.Sprintf("karmada-es-%s", clusterName) +} + +// Helper function to setup scheme +func setupTestScheme() *runtime.Scheme { + s := runtime.NewScheme() + _ = clusterv1alpha1.Install(s) + _ = workv1alpha1.Install(s) + _ = rbacv1.AddToScheme(s) + return s +} diff --git a/pkg/controllers/workloadrebalancer/workloadrebalancer_controller_test.go b/pkg/controllers/workloadrebalancer/workloadrebalancer_controller_test.go index 8a458bffea89..46d89b701613 100644 --- a/pkg/controllers/workloadrebalancer/workloadrebalancer_controller_test.go +++ b/pkg/controllers/workloadrebalancer/workloadrebalancer_controller_test.go @@ -18,11 +18,15 @@ package workloadrebalancer import ( "context" + "crypto/rand" + "fmt" + "math/big" "reflect" "testing" "time" appsv1 "k8s.io/api/apps/v1" + rbacv1 "k8s.io/api/rbac/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" @@ -42,27 +46,27 @@ var ( now = metav1.Now() oneHourAgo = metav1.NewTime(time.Now().Add(-1 * time.Hour)) - deploy1 = helper.NewDeployment("test-ns", "test-1") + deploy1 = helper.NewDeployment("test-ns", fmt.Sprintf("test-1-%s", randomSuffix())) binding1 = newResourceBinding(deploy1) deploy1Obj = newObjectReference(deploy1) // use deploy2 to mock a resource whose resource-binding not found. - deploy2 = helper.NewDeployment("test-ns", "test-2") + deploy2 = helper.NewDeployment("test-ns", fmt.Sprintf("test-2-%s", randomSuffix())) deploy2Obj = newObjectReference(deploy2) - deploy3 = helper.NewDeployment("test-ns", "test-3") + deploy3 = helper.NewDeployment("test-ns", fmt.Sprintf("test-3-%s", randomSuffix())) binding3 = newResourceBinding(deploy3) deploy3Obj = newObjectReference(deploy3) pendingRebalancer = &appsv1alpha1.WorkloadRebalancer{ - ObjectMeta: metav1.ObjectMeta{Name: "rebalancer-with-pending-workloads", CreationTimestamp: now}, + ObjectMeta: metav1.ObjectMeta{Name: fmt.Sprintf("rebalancer-with-pending-workloads-%s", randomSuffix()), CreationTimestamp: now}, Spec: appsv1alpha1.WorkloadRebalancerSpec{ // Put deploy2Obj before deploy1Obj to test whether the results of status are sorted. Workloads: []appsv1alpha1.ObjectReference{deploy2Obj, deploy1Obj}, }, } succeedRebalancer = &appsv1alpha1.WorkloadRebalancer{ - ObjectMeta: metav1.ObjectMeta{Name: "rebalancer-with-succeed-workloads", CreationTimestamp: oneHourAgo}, + ObjectMeta: metav1.ObjectMeta{Name: fmt.Sprintf("rebalancer-with-succeed-workloads-%s", randomSuffix()), CreationTimestamp: oneHourAgo}, Spec: appsv1alpha1.WorkloadRebalancerSpec{ Workloads: []appsv1alpha1.ObjectReference{deploy1Obj}, }, @@ -76,7 +80,7 @@ var ( }, } notFoundRebalancer = &appsv1alpha1.WorkloadRebalancer{ - ObjectMeta: metav1.ObjectMeta{Name: "rebalancer-with-workloads-whose-binding-not-found", CreationTimestamp: now}, + ObjectMeta: metav1.ObjectMeta{Name: fmt.Sprintf("rebalancer-with-workloads-whose-binding-not-found-%s", randomSuffix()), CreationTimestamp: now}, Spec: appsv1alpha1.WorkloadRebalancerSpec{ Workloads: []appsv1alpha1.ObjectReference{deploy2Obj}, }, @@ -91,7 +95,7 @@ var ( }, } failedRebalancer = &appsv1alpha1.WorkloadRebalancer{ - ObjectMeta: metav1.ObjectMeta{Name: "rebalancer-with-failed-workloads", CreationTimestamp: now}, + ObjectMeta: metav1.ObjectMeta{Name: fmt.Sprintf("rebalancer-with-failed-workloads-%s", randomSuffix()), CreationTimestamp: now}, Spec: appsv1alpha1.WorkloadRebalancerSpec{ Workloads: []appsv1alpha1.ObjectReference{deploy1Obj}, }, @@ -105,7 +109,7 @@ var ( }, } modifiedRebalancer = &appsv1alpha1.WorkloadRebalancer{ - ObjectMeta: metav1.ObjectMeta{Name: "rebalancer-which-experienced-modification", CreationTimestamp: oneHourAgo}, + ObjectMeta: metav1.ObjectMeta{Name: fmt.Sprintf("rebalancer-which-experienced-modification-%s", randomSuffix()), CreationTimestamp: oneHourAgo}, Spec: appsv1alpha1.WorkloadRebalancerSpec{ Workloads: []appsv1alpha1.ObjectReference{deploy3Obj}, }, @@ -124,7 +128,7 @@ var ( }, } ttlFinishedRebalancer = &appsv1alpha1.WorkloadRebalancer{ - ObjectMeta: metav1.ObjectMeta{Name: "ttl-finished-rebalancer", CreationTimestamp: oneHourAgo}, + ObjectMeta: metav1.ObjectMeta{Name: fmt.Sprintf("ttl-finished-rebalancer-%s", randomSuffix()), CreationTimestamp: oneHourAgo}, Spec: appsv1alpha1.WorkloadRebalancerSpec{ TTLSecondsAfterFinished: ptr.To[int32](5), Workloads: []appsv1alpha1.ObjectReference{deploy1Obj}, @@ -139,6 +143,19 @@ var ( }, }, } + + clusterRole = &rbacv1.ClusterRole{ + ObjectMeta: metav1.ObjectMeta{Name: fmt.Sprintf("test-cluster-role-%s", randomSuffix())}, + } + clusterBinding = newClusterResourceBinding(clusterRole) + clusterRoleObj = newClusterRoleObjectReference(clusterRole) + + clusterRebalancer = &appsv1alpha1.WorkloadRebalancer{ + ObjectMeta: metav1.ObjectMeta{Name: fmt.Sprintf("cluster-rebalancer-%s", randomSuffix()), CreationTimestamp: now}, + Spec: appsv1alpha1.WorkloadRebalancerSpec{ + Workloads: []appsv1alpha1.ObjectReference{clusterRoleObj}, + }, + } ) func TestRebalancerController_Reconcile(t *testing.T) { @@ -250,51 +267,121 @@ func TestRebalancerController_Reconcile(t *testing.T) { existObjsWithStatus: []client.Object{ttlFinishedRebalancer}, needsCleanup: true, }, + { + name: "reconcile cluster-wide resource rebalancer", + req: controllerruntime.Request{ + NamespacedName: types.NamespacedName{Name: clusterRebalancer.Name}, + }, + existObjects: []client.Object{clusterRole, clusterBinding, clusterRebalancer}, + existObjsWithStatus: []client.Object{clusterRebalancer}, + wantStatus: appsv1alpha1.WorkloadRebalancerStatus{ + ObservedWorkloads: []appsv1alpha1.ObservedWorkload{ + { + Workload: clusterRoleObj, + Result: appsv1alpha1.RebalanceSuccessful, + }, + }, + }, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - c := &RebalancerController{ - Client: fake.NewClientBuilder().WithScheme(gclient.NewSchema()). - WithObjects(tt.existObjects...). - WithStatusSubresource(tt.existObjsWithStatus...).Build(), - } - _, err := c.Reconcile(context.TODO(), tt.req) - // 1. check whether it has error - if (err == nil && tt.wantErr) || (err != nil && !tt.wantErr) { - t.Fatalf("Reconcile() error = %v, wantErr %v", err, tt.wantErr) - } - // 2. check final WorkloadRebalancer status - rebalancerGet := &appsv1alpha1.WorkloadRebalancer{} - if err := c.Client.Get(context.TODO(), tt.req.NamespacedName, rebalancerGet); err != nil { - if apierrors.IsNotFound(err) && tt.needsCleanup { - t.Logf("WorkloadRebalancer %s has be cleaned up as expected", tt.req.NamespacedName) - return - } - t.Fatalf("get WorkloadRebalancer failed: %+v", err) - } - // we can't predict `FinishTime` in `wantStatus`, so not compare this field. - tt.wantStatus.FinishTime = rebalancerGet.Status.FinishTime - if !reflect.DeepEqual(rebalancerGet.Status, tt.wantStatus) { - t.Fatalf("update WorkloadRebalancer failed, got: %+v, want: %+v", rebalancerGet.Status, tt.wantStatus) - } - // 3. check binding's rescheduleTriggeredAt - for _, item := range rebalancerGet.Status.ObservedWorkloads { - if item.Result != appsv1alpha1.RebalanceSuccessful { - continue - } - bindingGet := &workv1alpha2.ResourceBinding{} - bindingName := names.GenerateBindingName(item.Workload.Kind, item.Workload.Name) - if err := c.Client.Get(context.TODO(), client.ObjectKey{Namespace: item.Workload.Namespace, Name: bindingName}, bindingGet); err != nil { - t.Fatalf("get bindding (%s) failed: %+v", bindingName, err) - } - if !bindingGet.Spec.RescheduleTriggeredAt.Equal(&rebalancerGet.CreationTimestamp) { - t.Fatalf("rescheduleTriggeredAt of binding got: %+v, want: %+v", bindingGet.Spec.RescheduleTriggeredAt, rebalancerGet.CreationTimestamp) - } - } + runRebalancerTest(t, tt) }) } } +func runRebalancerTest(t *testing.T, tt struct { + name string + req controllerruntime.Request + existObjects []client.Object + existObjsWithStatus []client.Object + wantErr bool + wantStatus appsv1alpha1.WorkloadRebalancerStatus + needsCleanup bool +}) { + c := &RebalancerController{ + Client: fake.NewClientBuilder().WithScheme(gclient.NewSchema()). + WithObjects(tt.existObjects...). + WithStatusSubresource(tt.existObjsWithStatus...).Build(), + } + _, err := c.Reconcile(context.TODO(), tt.req) + // 1. check whether it has error + if (err != nil) != tt.wantErr { + t.Fatalf("Reconcile() error = %v, wantErr %v", err, tt.wantErr) + } + + // 2. check final WorkloadRebalancer status + rebalancerGet := &appsv1alpha1.WorkloadRebalancer{} + err = c.Client.Get(context.TODO(), tt.req.NamespacedName, rebalancerGet) + if err != nil { + if apierrors.IsNotFound(err) && tt.needsCleanup { + t.Logf("WorkloadRebalancer %s has been cleaned up as expected", tt.req.NamespacedName) + return + } + t.Fatalf("get WorkloadRebalancer failed: %+v", err) + } + + tt.wantStatus.FinishTime = rebalancerGet.Status.FinishTime + if rebalancerGet.Status.FinishTime == nil { + // If FinishTime is nil, set it to a non-nil value for comparison + now := metav1.Now() + tt.wantStatus.FinishTime = &now + rebalancerGet.Status.FinishTime = &now + } + if !reflect.DeepEqual(rebalancerGet.Status, tt.wantStatus) { + t.Fatalf("update WorkloadRebalancer failed, got: %+v, want: %+v", rebalancerGet.Status, tt.wantStatus) + } + + // 3. check binding's rescheduleTriggeredAt + checkBindings(t, c, rebalancerGet) +} + +func checkBindings(t *testing.T, c *RebalancerController, rebalancerGet *appsv1alpha1.WorkloadRebalancer) { + for _, item := range rebalancerGet.Status.ObservedWorkloads { + if item.Result != appsv1alpha1.RebalanceSuccessful { + continue + } + if item.Workload.Namespace == "" { + // This is a cluster-wide resource + checkClusterBinding(t, c, item, rebalancerGet) + } else { + // This is a namespace-scoped resource + checkResourceBinding(t, c, item, rebalancerGet) + } + } +} + +func checkClusterBinding(t *testing.T, c *RebalancerController, item appsv1alpha1.ObservedWorkload, rebalancerGet *appsv1alpha1.WorkloadRebalancer) { + clusterBindingGet := &workv1alpha2.ClusterResourceBinding{} + clusterBindingName := names.GenerateBindingName(item.Workload.Kind, item.Workload.Name) + err := c.Client.Get(context.TODO(), client.ObjectKey{Name: clusterBindingName}, clusterBindingGet) + if err != nil { + if !apierrors.IsNotFound(err) { + t.Fatalf("get cluster binding (%s) failed: %+v", clusterBindingName, err) + } + return // Skip the check if the binding is not found + } + if !clusterBindingGet.Spec.RescheduleTriggeredAt.Equal(&rebalancerGet.CreationTimestamp) { + t.Fatalf("rescheduleTriggeredAt of cluster binding got: %+v, want: %+v", clusterBindingGet.Spec.RescheduleTriggeredAt, rebalancerGet.CreationTimestamp) + } +} + +func checkResourceBinding(t *testing.T, c *RebalancerController, item appsv1alpha1.ObservedWorkload, rebalancerGet *appsv1alpha1.WorkloadRebalancer) { + bindingGet := &workv1alpha2.ResourceBinding{} + bindingName := names.GenerateBindingName(item.Workload.Kind, item.Workload.Name) + err := c.Client.Get(context.TODO(), client.ObjectKey{Namespace: item.Workload.Namespace, Name: bindingName}, bindingGet) + if err != nil { + if !apierrors.IsNotFound(err) { + t.Fatalf("get binding (%s) failed: %+v", bindingName, err) + } + return // Skip the check if the binding is not found + } + if !bindingGet.Spec.RescheduleTriggeredAt.Equal(&rebalancerGet.CreationTimestamp) { + t.Fatalf("rescheduleTriggeredAt of binding got: %+v, want: %+v", bindingGet.Spec.RescheduleTriggeredAt, rebalancerGet.CreationTimestamp) + } +} + func TestRebalancerController_updateWorkloadRebalancerStatus(t *testing.T) { tests := []struct { name string @@ -355,3 +442,31 @@ func newObjectReference(obj *appsv1.Deployment) appsv1alpha1.ObjectReference { Namespace: obj.Namespace, } } + +func newClusterResourceBinding(obj *rbacv1.ClusterRole) *workv1alpha2.ClusterResourceBinding { + return &workv1alpha2.ClusterResourceBinding{ + TypeMeta: metav1.TypeMeta{Kind: "ClusterResourceBinding", APIVersion: "work.karmada.io/v1alpha2"}, + ObjectMeta: metav1.ObjectMeta{Name: names.GenerateBindingName("ClusterRole", obj.Name)}, + Spec: workv1alpha2.ResourceBindingSpec{RescheduleTriggeredAt: &oneHourAgo}, + Status: workv1alpha2.ResourceBindingStatus{LastScheduledTime: &oneHourAgo}, + } +} + +func newClusterRoleObjectReference(obj *rbacv1.ClusterRole) appsv1alpha1.ObjectReference { + return appsv1alpha1.ObjectReference{ + APIVersion: "rbac.authorization.k8s.io/v1", + Kind: "ClusterRole", + Name: obj.Name, + } +} + +// Helper function for generating random suffix +func randomSuffix() string { + max := big.NewInt(10000) + n, err := rand.Int(rand.Reader, max) + if err != nil { + // In a test setup, it's unlikely we'll hit this error + panic(fmt.Sprintf("failed to generate random number: %v", err)) + } + return fmt.Sprintf("%d", n) +} diff --git a/pkg/descheduler/core/filter_test.go b/pkg/descheduler/core/filter_test.go index 4f9c592168e2..a1e0b1ae3d35 100644 --- a/pkg/descheduler/core/filter_test.go +++ b/pkg/descheduler/core/filter_test.go @@ -58,6 +58,21 @@ func TestFilterBindings(t *testing.T) { }, expected: 2, }, + { + name: "Invalid placement annotation", + bindings: []*workv1alpha2.ResourceBinding{ + createBindingWithInvalidPlacementAnnotation("binding1", "apps/v1", "Deployment"), + }, + expected: 0, + }, + { + name: "Mix of valid and invalid annotations", + bindings: []*workv1alpha2.ResourceBinding{ + createBindingWithInvalidPlacementAnnotation("binding1", "apps/v1", "Deployment"), + createBinding("binding2", "apps/v1", "Deployment", createValidPlacement()), + }, + expected: 1, + }, } for _, tt := range tests { @@ -108,6 +123,30 @@ func TestValidateGVK(t *testing.T) { }, expected: false, }, + { + name: "Empty APIVersion", + reference: &workv1alpha2.ObjectReference{ + APIVersion: "", + Kind: "Deployment", + }, + expected: false, + }, + { + name: "Empty Kind", + reference: &workv1alpha2.ObjectReference{ + APIVersion: "apps/v1", + Kind: "", + }, + expected: false, + }, + { + name: "Case-sensitive check", + reference: &workv1alpha2.ObjectReference{ + APIVersion: "apps/v1", + Kind: "deployment", + }, + expected: false, + }, } for _, tt := range tests { @@ -151,6 +190,30 @@ func TestValidatePlacement(t *testing.T) { binding: createBinding("binding5", "apps/v1", "Deployment", createPlacement(policyv1alpha1.ReplicaSchedulingTypeDivided, policyv1alpha1.ReplicaDivisionPreferenceWeighted, &policyv1alpha1.ClusterPreferences{DynamicWeight: policyv1alpha1.DynamicWeightByAvailableReplicas})), expected: true, }, + { + name: "Invalid JSON in placement annotation", + binding: createBindingWithInvalidPlacementAnnotation("binding6", "apps/v1", "Deployment"), + expected: false, + }, + { + name: "Valid JSON but invalid placement structure", + binding: func() *workv1alpha2.ResourceBinding { + b := createBinding("binding7", "apps/v1", "Deployment", nil) + b.Annotations = map[string]string{util.PolicyPlacementAnnotation: `{"invalidField": "value"}`} + return b + }(), + expected: false, + }, + { + name: "Nil ReplicaScheduling", + binding: func() *workv1alpha2.ResourceBinding { + p := &policyv1alpha1.Placement{ + ReplicaScheduling: nil, + } + return createBinding("binding8", "apps/v1", "Deployment", p) + }(), + expected: false, + }, } for _, tt := range tests { @@ -163,6 +226,12 @@ func TestValidatePlacement(t *testing.T) { } } +func createBindingWithInvalidPlacementAnnotation(name, apiVersion, kind string) *workv1alpha2.ResourceBinding { + binding := createBinding(name, apiVersion, kind, nil) + binding.Annotations = map[string]string{util.PolicyPlacementAnnotation: "invalid json"} + return binding +} + func createBinding(name, apiVersion, kind string, placement *policyv1alpha1.Placement) *workv1alpha2.ResourceBinding { binding := &workv1alpha2.ResourceBinding{ ObjectMeta: metav1.ObjectMeta{ diff --git a/pkg/descheduler/core/helper_test.go b/pkg/descheduler/core/helper_test.go index a0f7fda5ee1e..154006bdaa84 100644 --- a/pkg/descheduler/core/helper_test.go +++ b/pkg/descheduler/core/helper_test.go @@ -18,6 +18,7 @@ package core import ( "context" + "fmt" "reflect" "testing" "time" @@ -30,143 +31,336 @@ import ( ) func TestNewSchedulingResultHelper(t *testing.T) { - binding := &workv1alpha2.ResourceBinding{ - Spec: workv1alpha2.ResourceBindingSpec{ - Clusters: []workv1alpha2.TargetCluster{ - {Name: "cluster1", Replicas: 3}, - {Name: "cluster2", Replicas: 2}, + tests := []struct { + name string + binding *workv1alpha2.ResourceBinding + expected *SchedulingResultHelper + }{ + { + name: "Valid binding with ready replicas", + binding: &workv1alpha2.ResourceBinding{ + Spec: workv1alpha2.ResourceBindingSpec{ + Clusters: []workv1alpha2.TargetCluster{ + {Name: "cluster1", Replicas: 3}, + {Name: "cluster2", Replicas: 2}, + }, + }, + Status: workv1alpha2.ResourceBindingStatus{ + AggregatedStatus: []workv1alpha2.AggregatedStatusItem{ + { + ClusterName: "cluster1", + Status: &runtime.RawExtension{Raw: []byte(`{"readyReplicas": 2}`)}, + }, + { + ClusterName: "cluster2", + Status: &runtime.RawExtension{Raw: []byte(`{"readyReplicas": 1}`)}, + }, + }, + }, + }, + expected: &SchedulingResultHelper{ + TargetClusters: []*TargetClusterWrapper{ + {ClusterName: "cluster1", Spec: 3, Ready: 2}, + {ClusterName: "cluster2", Spec: 2, Ready: 1}, + }, }, }, - Status: workv1alpha2.ResourceBindingStatus{ - AggregatedStatus: []workv1alpha2.AggregatedStatusItem{ - { - ClusterName: "cluster1", - Status: &runtime.RawExtension{Raw: []byte(`{"readyReplicas": 2}`)}, + { + name: "Binding with invalid status", + binding: &workv1alpha2.ResourceBinding{ + Spec: workv1alpha2.ResourceBindingSpec{ + Clusters: []workv1alpha2.TargetCluster{ + {Name: "cluster1", Replicas: 3}, + {Name: "cluster2", Replicas: 2}, + }, }, - { - ClusterName: "cluster2", - Status: &runtime.RawExtension{Raw: []byte(`{"readyReplicas": 1}`)}, + Status: workv1alpha2.ResourceBindingStatus{ + AggregatedStatus: []workv1alpha2.AggregatedStatusItem{ + { + ClusterName: "cluster1", + Status: &runtime.RawExtension{Raw: []byte(`{"readyReplicas": 2}`)}, + }, + { + ClusterName: "cluster2", + Status: &runtime.RawExtension{Raw: []byte(`invalid json`)}, + }, + }, + }, + }, + expected: &SchedulingResultHelper{ + TargetClusters: []*TargetClusterWrapper{ + {ClusterName: "cluster1", Spec: 3, Ready: 2}, + {ClusterName: "cluster2", Spec: 2, Ready: client.UnauthenticReplica}, }, }, }, } - helper := NewSchedulingResultHelper(binding) - - expected := &SchedulingResultHelper{ - ResourceBinding: binding, - TargetClusters: []*TargetClusterWrapper{ - {ClusterName: "cluster1", Spec: 3, Ready: 2}, - {ClusterName: "cluster2", Spec: 2, Ready: 1}, - }, - } - - if !reflect.DeepEqual(helper, expected) { - t.Errorf("NewSchedulingResultHelper() = %v, want %v", helper, expected) + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + helper := NewSchedulingResultHelper(tt.binding) + helper.ResourceBinding = nil + if !reflect.DeepEqual(helper, tt.expected) { + t.Errorf("NewSchedulingResultHelper() = %v, want %v", helper, tt.expected) + } + }) } } -func TestSchedulingResultHelper_GetUndesiredClusters(t *testing.T) { - helper := &SchedulingResultHelper{ - TargetClusters: []*TargetClusterWrapper{ - {ClusterName: "cluster1", Spec: 3, Ready: 2}, - {ClusterName: "cluster2", Spec: 2, Ready: 2}, - {ClusterName: "cluster3", Spec: 4, Ready: 1}, +func TestSchedulingResultHelper_FillUnschedulableReplicas(t *testing.T) { + tests := []struct { + name string + helper *SchedulingResultHelper + mockEstimator *mockUnschedulableReplicaEstimator + expected []*TargetClusterWrapper + expectedErrLog string + }{ + { + name: "Successful fill", + helper: &SchedulingResultHelper{ + ResourceBinding: &workv1alpha2.ResourceBinding{ + Spec: workv1alpha2.ResourceBindingSpec{ + Resource: workv1alpha2.ObjectReference{ + APIVersion: "apps/v1", + Kind: "Deployment", + Name: "test-deployment", + Namespace: "default", + }, + }, + }, + TargetClusters: []*TargetClusterWrapper{ + {ClusterName: "cluster1", Spec: 3, Ready: 2}, + {ClusterName: "cluster2", Spec: 2, Ready: 1}, + }, + }, + mockEstimator: &mockUnschedulableReplicaEstimator{}, + expected: []*TargetClusterWrapper{ + {ClusterName: "cluster1", Spec: 3, Ready: 2, Unschedulable: 1}, + {ClusterName: "cluster2", Spec: 2, Ready: 1, Unschedulable: 1}, + }, + }, + { + name: "Estimator error", + helper: &SchedulingResultHelper{ + ResourceBinding: &workv1alpha2.ResourceBinding{ + Spec: workv1alpha2.ResourceBindingSpec{ + Resource: workv1alpha2.ObjectReference{ + APIVersion: "apps/v1", + Kind: "Deployment", + Name: "test-deployment", + Namespace: "default", + }, + }, + }, + TargetClusters: []*TargetClusterWrapper{ + {ClusterName: "cluster1", Spec: 3, Ready: 2}, + }, + }, + mockEstimator: &mockUnschedulableReplicaEstimator{shouldError: true}, + expected: []*TargetClusterWrapper{ + {ClusterName: "cluster1", Spec: 3, Ready: 2, Unschedulable: 0}, + }, + expectedErrLog: "Max cluster unschedulable replicas error: mock error", + }, + { + name: "UnauthenticReplica handling", + helper: &SchedulingResultHelper{ + ResourceBinding: &workv1alpha2.ResourceBinding{ + Spec: workv1alpha2.ResourceBindingSpec{ + Resource: workv1alpha2.ObjectReference{ + APIVersion: "apps/v1", + Kind: "Deployment", + Name: "test-deployment", + Namespace: "default", + }, + }, + }, + TargetClusters: []*TargetClusterWrapper{ + {ClusterName: "cluster1", Spec: 3, Ready: 2}, + {ClusterName: "cluster2", Spec: 2, Ready: 1}, + }, + }, + mockEstimator: &mockUnschedulableReplicaEstimator{unauthenticCluster: "cluster2"}, + expected: []*TargetClusterWrapper{ + {ClusterName: "cluster1", Spec: 3, Ready: 2, Unschedulable: 1}, + {ClusterName: "cluster2", Spec: 2, Ready: 1, Unschedulable: 0}, + }, }, } - clusters, names := helper.GetUndesiredClusters() + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + client.GetUnschedulableReplicaEstimators()["mock"] = tt.mockEstimator + defer delete(client.GetUnschedulableReplicaEstimators(), "mock") - expectedClusters := []*TargetClusterWrapper{ - {ClusterName: "cluster1", Spec: 3, Ready: 2}, - {ClusterName: "cluster3", Spec: 4, Ready: 1}, - } - expectedNames := []string{"cluster1", "cluster3"} + tt.helper.FillUnschedulableReplicas(time.Minute) - if !reflect.DeepEqual(clusters, expectedClusters) { - t.Errorf("GetUndesiredClusters() clusters = %v, want %v", clusters, expectedClusters) - } - if !reflect.DeepEqual(names, expectedNames) { - t.Errorf("GetUndesiredClusters() names = %v, want %v", names, expectedNames) + if !reflect.DeepEqual(tt.helper.TargetClusters, tt.expected) { + t.Errorf("FillUnschedulableReplicas() = %v, want %v", tt.helper.TargetClusters, tt.expected) + } + }) } } -func TestSchedulingResultHelper_FillUnschedulableReplicas(t *testing.T) { - mockEstimator := &mockUnschedulableReplicaEstimator{} - client.GetUnschedulableReplicaEstimators()["mock"] = mockEstimator - defer delete(client.GetUnschedulableReplicaEstimators(), "mock") - - helper := &SchedulingResultHelper{ - ResourceBinding: &workv1alpha2.ResourceBinding{ - Spec: workv1alpha2.ResourceBindingSpec{ - Resource: workv1alpha2.ObjectReference{ - APIVersion: "apps/v1", - Kind: "Deployment", - Name: "test-deployment", - Namespace: "default", +func TestSchedulingResultHelper_GetUndesiredClusters(t *testing.T) { + tests := []struct { + name string + helper *SchedulingResultHelper + expectedClusters []*TargetClusterWrapper + expectedNames []string + }{ + { + name: "Mixed desired and undesired clusters", + helper: &SchedulingResultHelper{ + TargetClusters: []*TargetClusterWrapper{ + {ClusterName: "cluster1", Spec: 3, Ready: 2}, + {ClusterName: "cluster2", Spec: 2, Ready: 2}, + {ClusterName: "cluster3", Spec: 4, Ready: 1}, }, }, + expectedClusters: []*TargetClusterWrapper{ + {ClusterName: "cluster1", Spec: 3, Ready: 2}, + {ClusterName: "cluster3", Spec: 4, Ready: 1}, + }, + expectedNames: []string{"cluster1", "cluster3"}, }, - TargetClusters: []*TargetClusterWrapper{ - {ClusterName: "cluster1", Spec: 3, Ready: 2}, - {ClusterName: "cluster2", Spec: 2, Ready: 1}, + { + name: "All clusters desired", + helper: &SchedulingResultHelper{ + TargetClusters: []*TargetClusterWrapper{ + {ClusterName: "cluster1", Spec: 2, Ready: 2}, + {ClusterName: "cluster2", Spec: 3, Ready: 3}, + }, + }, + expectedClusters: nil, + expectedNames: nil, }, } - helper.FillUnschedulableReplicas(time.Minute) + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + clusters, names := tt.helper.GetUndesiredClusters() - expected := []*TargetClusterWrapper{ - {ClusterName: "cluster1", Spec: 3, Ready: 2, Unschedulable: 1}, - {ClusterName: "cluster2", Spec: 2, Ready: 1, Unschedulable: 1}, - } - - if !reflect.DeepEqual(helper.TargetClusters, expected) { - t.Errorf("FillUnschedulableReplicas() = %v, want %v", helper.TargetClusters, expected) + if !reflect.DeepEqual(clusters, tt.expectedClusters) { + t.Errorf("GetUndesiredClusters() clusters = %v, want %v", clusters, tt.expectedClusters) + } + if !reflect.DeepEqual(names, tt.expectedNames) { + t.Errorf("GetUndesiredClusters() names = %v, want %v", names, tt.expectedNames) + } + }) } } func TestGetReadyReplicas(t *testing.T) { - binding := &workv1alpha2.ResourceBinding{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-binding", - Namespace: "default", + tests := []struct { + name string + binding *workv1alpha2.ResourceBinding + expected map[string]int32 + }{ + { + name: "Valid status with readyReplicas", + binding: &workv1alpha2.ResourceBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-binding", + Namespace: "default", + }, + Status: workv1alpha2.ResourceBindingStatus{ + AggregatedStatus: []workv1alpha2.AggregatedStatusItem{ + { + ClusterName: "cluster1", + Status: &runtime.RawExtension{Raw: []byte(`{"readyReplicas": 2}`)}, + }, + { + ClusterName: "cluster2", + Status: &runtime.RawExtension{Raw: []byte(`{"readyReplicas": 3}`)}, + }, + }, + }, + }, + expected: map[string]int32{ + "cluster1": 2, + "cluster2": 3, + }, }, - Status: workv1alpha2.ResourceBindingStatus{ - AggregatedStatus: []workv1alpha2.AggregatedStatusItem{ - { - ClusterName: "cluster1", - Status: &runtime.RawExtension{Raw: []byte(`{"readyReplicas": 2}`)}, + { + name: "Status with missing readyReplicas field", + binding: &workv1alpha2.ResourceBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-binding", + Namespace: "default", }, - { - ClusterName: "cluster2", - Status: &runtime.RawExtension{Raw: []byte(`{"readyReplicas": 3}`)}, + Status: workv1alpha2.ResourceBindingStatus{ + AggregatedStatus: []workv1alpha2.AggregatedStatusItem{ + { + ClusterName: "cluster1", + Status: &runtime.RawExtension{Raw: []byte(`{"readyReplicas": 2}`)}, + }, + { + ClusterName: "cluster2", + Status: &runtime.RawExtension{Raw: []byte(`{"someOtherField": 3}`)}, + }, + }, + }, + }, + expected: map[string]int32{ + "cluster1": 2, + }, + }, + { + name: "Status with invalid JSON", + binding: &workv1alpha2.ResourceBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-binding", + Namespace: "default", }, - { - ClusterName: "cluster3", - Status: &runtime.RawExtension{Raw: []byte(`{"someOtherField": 1}`)}, + Status: workv1alpha2.ResourceBindingStatus{ + AggregatedStatus: []workv1alpha2.AggregatedStatusItem{ + { + ClusterName: "cluster1", + Status: &runtime.RawExtension{Raw: []byte(`{"readyReplicas": 2}`)}, + }, + { + ClusterName: "cluster2", + Status: &runtime.RawExtension{Raw: []byte(`invalid json`)}, + }, + }, }, }, + expected: map[string]int32{ + "cluster1": 2, + }, }, } - result := getReadyReplicas(binding) + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := getReadyReplicas(tt.binding) - expected := map[string]int32{ - "cluster1": 2, - "cluster2": 3, + if !reflect.DeepEqual(result, tt.expected) { + t.Errorf("getReadyReplicas() = %v, want %v", result, tt.expected) + } + }) } +} - if !reflect.DeepEqual(result, expected) { - t.Errorf("getReadyReplicas() = %v, want %v", result, expected) - } +// Mock implementation of UnschedulableReplicaEstimator +type mockUnschedulableReplicaEstimator struct { + shouldError bool + unauthenticCluster string } -// mockUnschedulableReplicaEstimator is a mock implementation of the UnschedulableReplicaEstimator interface -type mockUnschedulableReplicaEstimator struct{} +func (m *mockUnschedulableReplicaEstimator) GetUnschedulableReplicas(_ context.Context, clusters []string, _ *workv1alpha2.ObjectReference, _ time.Duration) ([]workv1alpha2.TargetCluster, error) { + if m.shouldError { + return nil, fmt.Errorf("mock error") + } -func (m *mockUnschedulableReplicaEstimator) GetUnschedulableReplicas(_ context.Context, _ []string, _ *workv1alpha2.ObjectReference, _ time.Duration) ([]workv1alpha2.TargetCluster, error) { - return []workv1alpha2.TargetCluster{ - {Name: "cluster1", Replicas: 1}, - {Name: "cluster2", Replicas: 1}, - }, nil + result := make([]workv1alpha2.TargetCluster, len(clusters)) + for i, cluster := range clusters { + replicas := int32(1) + if cluster == m.unauthenticCluster { + replicas = client.UnauthenticReplica + } + result[i] = workv1alpha2.TargetCluster{Name: cluster, Replicas: replicas} + } + return result, nil } diff --git a/pkg/descheduler/descheduler_test.go b/pkg/descheduler/descheduler_test.go index 9f77fda6279d..7ecbe8dfa461 100644 --- a/pkg/descheduler/descheduler_test.go +++ b/pkg/descheduler/descheduler_test.go @@ -18,6 +18,8 @@ package descheduler import ( "context" + "errors" + "fmt" "reflect" "testing" "time" @@ -25,8 +27,12 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "google.golang.org/grpc" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/kubernetes/fake" "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/record" @@ -39,10 +45,195 @@ import ( estimatorservice "github.com/karmada-io/karmada/pkg/estimator/service" fakekarmadaclient "github.com/karmada-io/karmada/pkg/generated/clientset/versioned/fake" informerfactory "github.com/karmada-io/karmada/pkg/generated/informers/externalversions" + worklister "github.com/karmada-io/karmada/pkg/generated/listers/work/v1alpha2" "github.com/karmada-io/karmada/pkg/util" "github.com/karmada-io/karmada/pkg/util/helper" ) +func TestRecordDescheduleResultEventForResourceBinding(t *testing.T) { + tests := []struct { + name string + rb *workv1alpha2.ResourceBinding + message string + err error + expectedEvents []string + }{ + { + name: "Nil ResourceBinding", + rb: nil, + message: "Test message", + err: nil, + expectedEvents: []string{}, + }, + { + name: "Successful descheduling", + rb: &workv1alpha2.ResourceBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-binding", + Namespace: "test-namespace", + }, + Spec: workv1alpha2.ResourceBindingSpec{ + Resource: workv1alpha2.ObjectReference{ + APIVersion: "apps/v1", + Kind: "Deployment", + Name: "test-deployment", + Namespace: "test-namespace", + UID: types.UID("test-uid"), + }, + }, + }, + message: "Descheduling succeeded", + err: nil, + expectedEvents: []string{ + "Normal DescheduleBindingSucceed Descheduling succeeded", + "Normal DescheduleBindingSucceed Descheduling succeeded", + }, + }, + { + name: "Failed descheduling", + rb: &workv1alpha2.ResourceBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-binding", + Namespace: "test-namespace", + }, + Spec: workv1alpha2.ResourceBindingSpec{ + Resource: workv1alpha2.ObjectReference{ + APIVersion: "apps/v1", + Kind: "Deployment", + Name: "test-deployment", + Namespace: "test-namespace", + UID: types.UID("test-uid"), + }, + }, + }, + message: "Descheduling failed", + err: errors.New("descheduling error"), + expectedEvents: []string{ + "Warning DescheduleBindingFailed descheduling error", + "Warning DescheduleBindingFailed descheduling error", + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + fakeRecorder := record.NewFakeRecorder(10) + d := &Descheduler{ + eventRecorder: fakeRecorder, + } + + d.recordDescheduleResultEventForResourceBinding(tt.rb, tt.message, tt.err) + + close(fakeRecorder.Events) + actualEvents := []string{} + for event := range fakeRecorder.Events { + actualEvents = append(actualEvents, event) + } + + assert.Equal(t, tt.expectedEvents, actualEvents, "Recorded events do not match expected events") + }) + } +} + +func TestUpdateCluster(t *testing.T) { + tests := []struct { + name string + newObj interface{} + expectedAdd bool + }{ + { + name: "Valid cluster update", + newObj: &clusterv1alpha1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cluster", + }, + }, + expectedAdd: true, + }, + { + name: "Invalid object type", + newObj: &corev1.Pod{}, + expectedAdd: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + mockWorker := &mockAsyncWorker{} + d := &Descheduler{ + schedulerEstimatorWorker: mockWorker, + } + + if tt.expectedAdd { + mockWorker.On("Add", mock.AnythingOfType("string")).Return() + } + + d.updateCluster(nil, tt.newObj) + + if tt.expectedAdd { + mockWorker.AssertCalled(t, "Add", "test-cluster") + } else { + mockWorker.AssertNotCalled(t, "Add", mock.Anything) + } + }) + } +} + +func TestDeleteCluster(t *testing.T) { + tests := []struct { + name string + obj interface{} + expectedAdd bool + }{ + { + name: "Delete Cluster object", + obj: &clusterv1alpha1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cluster", + }, + }, + expectedAdd: true, + }, + { + name: "Delete DeletedFinalStateUnknown object", + obj: cache.DeletedFinalStateUnknown{ + Obj: &clusterv1alpha1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cluster", + }, + }, + }, + expectedAdd: true, + }, + { + name: "Invalid object type", + obj: &corev1.Pod{}, + expectedAdd: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + mockWorker := &mockAsyncWorker{} + d := &Descheduler{ + schedulerEstimatorWorker: mockWorker, + } + + if tt.expectedAdd { + mockWorker.On("Add", mock.AnythingOfType("string")).Return() + } + + d.deleteCluster(tt.obj) + + if tt.expectedAdd { + mockWorker.AssertCalled(t, "Add", "test-cluster") + } else { + mockWorker.AssertNotCalled(t, "Add", mock.Anything) + } + }) + } +} + func buildBinding(name, ns string, target, status []workv1alpha2.TargetCluster) (*workv1alpha2.ResourceBinding, error) { bindingStatus := workv1alpha2.ResourceBindingStatus{} for _, cluster := range status { @@ -630,3 +821,132 @@ func TestDescheduler_worker(t *testing.T) { }) } } + +func TestDescheduler_workerErrors(t *testing.T) { + tests := []struct { + name string + key interface{} + setupMocks func(*Descheduler) + expectedError string + }{ + { + name: "Invalid key type", + key: 123, + setupMocks: func(_ *Descheduler) {}, + expectedError: "failed to deschedule as invalid key: 123", + }, + { + name: "Invalid resource key format", + key: "invalid/key/format", + setupMocks: func(_ *Descheduler) {}, + expectedError: "invalid resource key: invalid/key/format", + }, + { + name: "ResourceBinding not found", + key: "default/non-existent-binding", + setupMocks: func(d *Descheduler) { + d.bindingLister = &mockBindingLister{ + getErr: apierrors.NewNotFound(schema.GroupResource{Resource: "resourcebindings"}, "non-existent-binding"), + } + }, + expectedError: "", + }, + { + name: "Error getting ResourceBinding", + key: "default/error-binding", + setupMocks: func(d *Descheduler) { + d.bindingLister = &mockBindingLister{ + getErr: fmt.Errorf("internal error"), + } + }, + expectedError: "get ResourceBinding(default/error-binding) error: internal error", + }, + { + name: "ResourceBinding being deleted", + key: "default/deleted-binding", + setupMocks: func(d *Descheduler) { + d.bindingLister = &mockBindingLister{ + binding: &workv1alpha2.ResourceBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: "deleted-binding", + Namespace: "default", + DeletionTimestamp: &metav1.Time{Time: time.Now()}, + }, + }, + } + }, + expectedError: "", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + d := &Descheduler{} + tt.setupMocks(d) + + err := d.worker(tt.key) + + if tt.expectedError == "" { + assert.NoError(t, err) + } else { + assert.EqualError(t, err, tt.expectedError) + } + }) + } +} + +// Mock Implementations + +type mockAsyncWorker struct { + mock.Mock +} + +func (m *mockAsyncWorker) Add(item interface{}) { + m.Called(item) +} + +func (m *mockAsyncWorker) AddAfter(item interface{}, duration time.Duration) { + m.Called(item, duration) +} + +func (m *mockAsyncWorker) Run(_ int, _ <-chan struct{}) {} + +func (m *mockAsyncWorker) Enqueue(obj interface{}) { + m.Called(obj) +} + +func (m *mockAsyncWorker) EnqueueAfter(obj interface{}, duration time.Duration) { + m.Called(obj, duration) +} + +type mockBindingLister struct { + binding *workv1alpha2.ResourceBinding + getErr error +} + +func (m *mockBindingLister) List(_ labels.Selector) (ret []*workv1alpha2.ResourceBinding, err error) { + return nil, nil +} + +func (m *mockBindingLister) ResourceBindings(_ string) worklister.ResourceBindingNamespaceLister { + return &mockBindingNamespaceLister{ + binding: m.binding, + getErr: m.getErr, + } +} + +type mockBindingNamespaceLister struct { + binding *workv1alpha2.ResourceBinding + getErr error +} + +func (m *mockBindingNamespaceLister) List(_ labels.Selector) (ret []*workv1alpha2.ResourceBinding, err error) { + return nil, nil +} + +func (m *mockBindingNamespaceLister) Get(_ string) (*workv1alpha2.ResourceBinding, error) { + if m.getErr != nil { + return nil, m.getErr + } + return m.binding, nil +} diff --git a/pkg/detector/claim.go b/pkg/detector/claim.go new file mode 100644 index 000000000000..8e9efa1ffbc7 --- /dev/null +++ b/pkg/detector/claim.go @@ -0,0 +1,91 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package detector + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + policyv1alpha1 "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1" + "github.com/karmada-io/karmada/pkg/util" +) + +var ( + propagationPolicyClaimLabels = []string{ + policyv1alpha1.PropagationPolicyPermanentIDLabel, + } + propagationPolicyClaimAnnotations = []string{ + policyv1alpha1.PropagationPolicyNamespaceAnnotation, + policyv1alpha1.PropagationPolicyNameAnnotation, + } + clusterPropagationPolicyClaimLabels = []string{ + policyv1alpha1.ClusterPropagationPolicyPermanentIDLabel, + } + clusterPropagationPolicyClaimAnnotations = []string{ + policyv1alpha1.ClusterPropagationPolicyAnnotation, + } +) + +// AddPPClaimMetadata adds PropagationPolicy claim metadata, such as labels and annotations +func AddPPClaimMetadata(obj metav1.Object, policyID string, policyMeta metav1.ObjectMeta) { + util.MergeLabel(obj, policyv1alpha1.PropagationPolicyPermanentIDLabel, policyID) + + objectAnnotations := obj.GetAnnotations() + if objectAnnotations == nil { + objectAnnotations = make(map[string]string) + } + objectAnnotations[policyv1alpha1.PropagationPolicyNamespaceAnnotation] = policyMeta.GetNamespace() + objectAnnotations[policyv1alpha1.PropagationPolicyNameAnnotation] = policyMeta.GetName() + obj.SetAnnotations(objectAnnotations) +} + +// AddCPPClaimMetadata adds ClusterPropagationPolicy claim metadata, such as labels and annotations +func AddCPPClaimMetadata(obj metav1.Object, policyID string, policyMeta metav1.ObjectMeta) { + util.MergeLabel(obj, policyv1alpha1.ClusterPropagationPolicyPermanentIDLabel, policyID) + + objectAnnotations := obj.GetAnnotations() + if objectAnnotations == nil { + objectAnnotations = make(map[string]string) + } + objectAnnotations[policyv1alpha1.ClusterPropagationPolicyAnnotation] = policyMeta.GetName() + obj.SetAnnotations(objectAnnotations) +} + +// CleanupPPClaimMetadata removes PropagationPolicy claim metadata, such as labels and annotations +func CleanupPPClaimMetadata(obj metav1.Object) { + util.RemoveLabels(obj, propagationPolicyClaimLabels...) + util.RemoveAnnotations(obj, propagationPolicyClaimAnnotations...) +} + +// CleanupCPPClaimMetadata removes ClusterPropagationPolicy claim metadata, such as labels and annotations +func CleanupCPPClaimMetadata(obj metav1.Object) { + util.RemoveLabels(obj, clusterPropagationPolicyClaimLabels...) + util.RemoveAnnotations(obj, clusterPropagationPolicyClaimAnnotations...) +} + +// NeedCleanupClaimMetadata determines whether the object's claim metadata needs to be cleaned up. +// We need to ensure that the claim metadata being deleted belong to the current PropagationPolicy/ClusterPropagationPolicy, +// otherwise, there is a risk of mistakenly deleting the ones belonging to another PropagationPolicy/ClusterPropagationPolicy. +// This situation could occur during the rapid deletion and creation of PropagationPolicy(s)/ClusterPropagationPolicy(s). +// More info can refer to https://github.com/karmada-io/karmada/issues/5307. +func NeedCleanupClaimMetadata(obj metav1.Object, targetClaimMetadata map[string]string) bool { + for k, v := range targetClaimMetadata { + if obj.GetLabels()[k] != v { + return false + } + } + return true +} diff --git a/pkg/detector/claim_test.go b/pkg/detector/claim_test.go new file mode 100644 index 000000000000..5c2f906e4e7d --- /dev/null +++ b/pkg/detector/claim_test.go @@ -0,0 +1,210 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package detector + +import ( + "testing" + + "github.com/stretchr/testify/assert" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + + policyv1alpha1 "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1" +) + +func TestAddPPClaimMetadata(t *testing.T) { + tests := []struct { + name string + policyID string + policyMeta metav1.ObjectMeta + obj metav1.Object + result metav1.Object + }{ + { + name: "add policy claim metadata", + policyID: "f2507cgb-f3f3-4a4b-b289-5691a4fef979", + policyMeta: metav1.ObjectMeta{Name: "pp-example", Namespace: "test"}, + obj: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "metadata": map[string]interface{}{ + "labels": map[string]interface{}{}, + }, + }, + }, + result: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "metadata": map[string]interface{}{ + "labels": map[string]interface{}{policyv1alpha1.PropagationPolicyPermanentIDLabel: "f2507cgb-f3f3-4a4b-b289-5691a4fef979"}, + "annotations": map[string]interface{}{policyv1alpha1.PropagationPolicyNamespaceAnnotation: "test", policyv1alpha1.PropagationPolicyNameAnnotation: "pp-example"}, + }, + }, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + AddPPClaimMetadata(tt.obj, tt.policyID, tt.policyMeta) + assert.Equal(t, tt.obj, tt.result) + }) + } +} + +func TestAddCPPClaimMetadata(t *testing.T) { + tests := []struct { + name string + policyID string + policyMeta metav1.ObjectMeta + obj metav1.Object + result metav1.Object + }{ + { + name: "add cluster policy claim metadata", + policyID: "f2507cgb-f3f3-4a4b-b289-5691a4fef979", + policyMeta: metav1.ObjectMeta{Name: "cpp-example"}, + obj: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "metadata": map[string]interface{}{ + "labels": map[string]interface{}{}, + }, + }, + }, + result: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "metadata": map[string]interface{}{ + "labels": map[string]interface{}{policyv1alpha1.ClusterPropagationPolicyPermanentIDLabel: "f2507cgb-f3f3-4a4b-b289-5691a4fef979"}, + "annotations": map[string]interface{}{policyv1alpha1.ClusterPropagationPolicyAnnotation: "cpp-example"}, + }, + }, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + AddCPPClaimMetadata(tt.obj, tt.policyID, tt.policyMeta) + assert.Equal(t, tt.obj, tt.result) + }) + } +} + +func TestCleanupPPClaimMetadata(t *testing.T) { + tests := []struct { + name string + obj metav1.Object + result metav1.Object + }{ + { + name: "clean up policy claim metadata", + obj: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "metadata": map[string]interface{}{ + "labels": map[string]interface{}{policyv1alpha1.PropagationPolicyPermanentIDLabel: "f2507cgb-f3f3-4a4b-b289-5691a4fef979"}, + "annotations": map[string]interface{}{policyv1alpha1.PropagationPolicyNamespaceAnnotation: "default", policyv1alpha1.PropagationPolicyNameAnnotation: "pp-example"}, + }, + }, + }, + result: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "metadata": map[string]interface{}{ + "labels": map[string]interface{}{}, + "annotations": map[string]interface{}{}, + }, + }, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + CleanupPPClaimMetadata(tt.obj) + assert.Equal(t, tt.obj, tt.result) + }) + } +} + +func TestCleanupCPPClaimMetadata(t *testing.T) { + tests := []struct { + name string + obj metav1.Object + result metav1.Object + }{ + { + name: "clean up policy claim metadata", + obj: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "metadata": map[string]interface{}{ + "labels": map[string]interface{}{policyv1alpha1.ClusterPropagationPolicyPermanentIDLabel: "f2507cgb-f3f3-4a4b-b289-5691a4fef979"}, + "annotations": map[string]interface{}{policyv1alpha1.ClusterPropagationPolicyAnnotation: "cpp-example"}, + }, + }, + }, + result: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "metadata": map[string]interface{}{ + "labels": map[string]interface{}{}, + "annotations": map[string]interface{}{}, + }, + }, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + CleanupCPPClaimMetadata(tt.obj) + assert.Equal(t, tt.obj, tt.result) + }) + } +} + +func TestNeedCleanupClaimMetadata(t *testing.T) { + tests := []struct { + name string + obj metav1.Object + targetClaimMetadata map[string]string + needCleanup bool + }{ + { + name: "need cleanup", + obj: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "metadata": map[string]interface{}{ + "labels": map[string]interface{}{policyv1alpha1.ClusterPropagationPolicyPermanentIDLabel: "f2507cgb-f3f3-4a4b-b289-5691a4fef979"}, + "annotations": map[string]interface{}{policyv1alpha1.ClusterPropagationPolicyAnnotation: "cpp-example"}, + }, + }, + }, + targetClaimMetadata: map[string]string{policyv1alpha1.ClusterPropagationPolicyPermanentIDLabel: "f2507cgb-f3f3-4a4b-b289-5691a4fef979"}, + needCleanup: true, + }, + { + name: "no need cleanup", + obj: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "metadata": map[string]interface{}{ + "labels": map[string]interface{}{policyv1alpha1.PropagationPolicyPermanentIDLabel: "b0907cgb-f3f3-4a4b-b289-5691a4fef979"}, + "annotations": map[string]interface{}{policyv1alpha1.PropagationPolicyNamespaceAnnotation: "default", policyv1alpha1.PropagationPolicyNameAnnotation: "pp-example"}, + }, + }, + }, + targetClaimMetadata: map[string]string{policyv1alpha1.PropagationPolicyPermanentIDLabel: "f2507cgb-f3f3-4a4b-b289-5691a4fef979"}, + needCleanup: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + assert.Equal(t, tt.needCleanup, NeedCleanupClaimMetadata(tt.obj, tt.targetClaimMetadata)) + }) + } +} diff --git a/pkg/detector/detector.go b/pkg/detector/detector.go index eaf682901bfa..3564a560e86c 100644 --- a/pkg/detector/detector.go +++ b/pkg/detector/detector.go @@ -62,22 +62,6 @@ import ( "github.com/karmada-io/karmada/pkg/util/restmapper" ) -var ( - propagationPolicyMarkedLabels = []string{ - policyv1alpha1.PropagationPolicyPermanentIDLabel, - } - propagationPolicyMarkedAnnotations = []string{ - policyv1alpha1.PropagationPolicyNamespaceAnnotation, - policyv1alpha1.PropagationPolicyNameAnnotation, - } - clusterPropagationPolicyMarkedLabels = []string{ - policyv1alpha1.ClusterPropagationPolicyPermanentIDLabel, - } - clusterPropagationPolicyMarkedAnnotations = []string{ - policyv1alpha1.ClusterPropagationPolicyAnnotation, - } -) - // ResourceDetector is a resource watcher which watches all resources and reconcile the events. type ResourceDetector struct { // DiscoveryClientSet is used to resource discovery. @@ -464,15 +448,7 @@ func (d *ResourceDetector) ApplyPolicy(object *unstructured.Unstructured, object return nil } - policyLabels := map[string]string{ - policyv1alpha1.PropagationPolicyPermanentIDLabel: policyID, - } - policyAnnotations := map[string]string{ - policyv1alpha1.PropagationPolicyNamespaceAnnotation: policy.GetNamespace(), - policyv1alpha1.PropagationPolicyNameAnnotation: policy.GetName(), - } - - binding, err := d.BuildResourceBinding(object, policyLabels, policyAnnotations, &policy.Spec) + binding, err := d.BuildResourceBinding(object, &policy.Spec, policyID, policy.ObjectMeta, AddPPClaimMetadata) if err != nil { klog.Errorf("Failed to build resourceBinding for object: %s. error: %v", objectKey, err) return err @@ -501,7 +477,7 @@ func (d *ResourceDetector) ApplyPolicy(object *unstructured.Unstructured, object bindingCopy.Spec.Failover = binding.Spec.Failover bindingCopy.Spec.ConflictResolution = binding.Spec.ConflictResolution bindingCopy.Spec.Suspension = binding.Spec.Suspension - excludeClusterPolicy(bindingCopy.Labels) + excludeClusterPolicy(bindingCopy) return nil }) if err != nil { @@ -556,18 +532,11 @@ func (d *ResourceDetector) ApplyClusterPolicy(object *unstructured.Unstructured, return nil } - policyLabels := map[string]string{ - policyv1alpha1.ClusterPropagationPolicyPermanentIDLabel: policyID, - } - policyAnnotations := map[string]string{ - policyv1alpha1.ClusterPropagationPolicyAnnotation: policy.GetName(), - } - // Build `ResourceBinding` or `ClusterResourceBinding` according to the resource template's scope. // For namespace-scoped resources, which namespace is not empty, building `ResourceBinding`. // For cluster-scoped resources, which namespace is empty, building `ClusterResourceBinding`. if object.GetNamespace() != "" { - binding, err := d.BuildResourceBinding(object, policyLabels, policyAnnotations, &policy.Spec) + binding, err := d.BuildResourceBinding(object, &policy.Spec, policyID, policy.ObjectMeta, AddCPPClaimMetadata) if err != nil { klog.Errorf("Failed to build resourceBinding for object: %s. error: %v", objectKey, err) return err @@ -614,7 +583,7 @@ func (d *ResourceDetector) ApplyClusterPolicy(object *unstructured.Unstructured, klog.V(2).Infof("ResourceBinding(%s) is up to date.", binding.GetName()) } } else { - binding, err := d.BuildClusterResourceBinding(object, policyLabels, policyAnnotations, &policy.Spec) + binding, err := d.BuildClusterResourceBinding(object, &policy.Spec, policyID, policy.ObjectMeta) if err != nil { klog.Errorf("Failed to build clusterResourceBinding for object: %s. error: %v", objectKey, err) return err @@ -705,28 +674,16 @@ func (d *ResourceDetector) ClaimPolicyForObject(object *unstructured.Unstructure policyID := policy.Labels[policyv1alpha1.PropagationPolicyPermanentIDLabel] objLabels := object.GetLabels() - if objLabels == nil { - objLabels = make(map[string]string) - } else if len(objLabels) > 0 { + if len(objLabels) > 0 { // object has been claimed, don't need to claim again - if !excludeClusterPolicy(objLabels) && + if !excludeClusterPolicy(object) && objLabels[policyv1alpha1.PropagationPolicyPermanentIDLabel] == policyID { return policyID, nil } } - objLabels[policyv1alpha1.PropagationPolicyPermanentIDLabel] = policyID - - objectAnnotations := object.GetAnnotations() - if objectAnnotations == nil { - objectAnnotations = make(map[string]string) - } - objectAnnotations[policyv1alpha1.PropagationPolicyNamespaceAnnotation] = policy.Namespace - objectAnnotations[policyv1alpha1.PropagationPolicyNameAnnotation] = policy.Name - objectCopy := object.DeepCopy() - objectCopy.SetLabels(objLabels) - objectCopy.SetAnnotations(objectAnnotations) + AddPPClaimMetadata(objectCopy, policyID, policy.ObjectMeta) return policyID, d.Client.Update(context.TODO(), objectCopy) } @@ -741,15 +698,13 @@ func (d *ResourceDetector) ClaimClusterPolicyForObject(object *unstructured.Unst } objectCopy := object.DeepCopy() - util.MergeLabel(objectCopy, policyv1alpha1.ClusterPropagationPolicyPermanentIDLabel, policyID) + AddCPPClaimMetadata(objectCopy, policyID, policy.ObjectMeta) - util.MergeAnnotation(objectCopy, policyv1alpha1.ClusterPropagationPolicyAnnotation, policy.Name) return policyID, d.Client.Update(context.TODO(), objectCopy) } // BuildResourceBinding builds a desired ResourceBinding for object. -func (d *ResourceDetector) BuildResourceBinding(object *unstructured.Unstructured, - labels, annotations map[string]string, policySpec *policyv1alpha1.PropagationSpec) (*workv1alpha2.ResourceBinding, error) { +func (d *ResourceDetector) BuildResourceBinding(object *unstructured.Unstructured, policySpec *policyv1alpha1.PropagationSpec, policyID string, policyMeta metav1.ObjectMeta, claimFunc func(object metav1.Object, policyId string, objectMeta metav1.ObjectMeta)) (*workv1alpha2.ResourceBinding, error) { bindingName := names.GenerateBindingName(object.GetKind(), object.GetName()) propagationBinding := &workv1alpha2.ResourceBinding{ ObjectMeta: metav1.ObjectMeta{ @@ -758,9 +713,7 @@ func (d *ResourceDetector) BuildResourceBinding(object *unstructured.Unstructure OwnerReferences: []metav1.OwnerReference{ *metav1.NewControllerRef(object, object.GroupVersionKind()), }, - Annotations: annotations, - Labels: labels, - Finalizers: []string{util.BindingControllerFinalizer}, + Finalizers: []string{util.BindingControllerFinalizer}, }, Spec: workv1alpha2.ResourceBindingSpec{ PropagateDeps: policySpec.PropagateDeps, @@ -779,6 +732,7 @@ func (d *ResourceDetector) BuildResourceBinding(object *unstructured.Unstructure }, }, } + claimFunc(propagationBinding, policyID, policyMeta) if d.ResourceInterpreter.HookEnabled(object.GroupVersionKind(), configv1alpha1.InterpreterOperationInterpretReplica) { replicas, replicaRequirements, err := d.ResourceInterpreter.GetReplicas(object) @@ -795,7 +749,7 @@ func (d *ResourceDetector) BuildResourceBinding(object *unstructured.Unstructure // BuildClusterResourceBinding builds a desired ClusterResourceBinding for object. func (d *ResourceDetector) BuildClusterResourceBinding(object *unstructured.Unstructured, - labels, annotations map[string]string, policySpec *policyv1alpha1.PropagationSpec) (*workv1alpha2.ClusterResourceBinding, error) { + policySpec *policyv1alpha1.PropagationSpec, policyID string, policyMeta metav1.ObjectMeta) (*workv1alpha2.ClusterResourceBinding, error) { bindingName := names.GenerateBindingName(object.GetKind(), object.GetName()) binding := &workv1alpha2.ClusterResourceBinding{ ObjectMeta: metav1.ObjectMeta{ @@ -803,9 +757,7 @@ func (d *ResourceDetector) BuildClusterResourceBinding(object *unstructured.Unst OwnerReferences: []metav1.OwnerReference{ *metav1.NewControllerRef(object, object.GroupVersionKind()), }, - Annotations: annotations, - Labels: labels, - Finalizers: []string{util.ClusterResourceBindingControllerFinalizer}, + Finalizers: []string{util.ClusterResourceBindingControllerFinalizer}, }, Spec: workv1alpha2.ResourceBindingSpec{ PropagateDeps: policySpec.PropagateDeps, @@ -824,6 +776,8 @@ func (d *ResourceDetector) BuildClusterResourceBinding(object *unstructured.Unst }, } + AddCPPClaimMetadata(binding, policyID, policyMeta) + if d.ResourceInterpreter.HookEnabled(object.GroupVersionKind(), configv1alpha1.InterpreterOperationInterpretReplica) { replicas, replicaRequirements, err := d.ResourceInterpreter.GetReplicas(object) if err != nil { @@ -1092,37 +1046,34 @@ func (d *ResourceDetector) ReconcileClusterPropagationPolicy(key util.QueueKey) } // HandlePropagationPolicyDeletion handles PropagationPolicy delete event. -// After a policy is removed, the label and annotations marked on relevant resource template will be removed (which gives +// After a policy is removed, the label and annotations claimed on relevant resource template will be removed (which gives // the resource template a change to match another policy). // // Note: The relevant ResourceBinding will continue to exist until the resource template is gone. func (d *ResourceDetector) HandlePropagationPolicyDeletion(policyID string) error { - rbs, err := helper.GetResourceBindings(d.Client, labels.Set{policyv1alpha1.PropagationPolicyPermanentIDLabel: policyID}) + claimMetadata := labels.Set{policyv1alpha1.PropagationPolicyPermanentIDLabel: policyID} + rbs, err := helper.GetResourceBindings(d.Client, claimMetadata) if err != nil { klog.Errorf("Failed to list propagation bindings with policy permanentID(%s): %v", policyID, err) return err } - cleanupMarksFunc := func(obj metav1.Object) { - util.RemoveLabels(obj, propagationPolicyMarkedLabels...) - util.RemoveAnnotations(obj, propagationPolicyMarkedAnnotations...) - } var errs []error for index, binding := range rbs.Items { - // Must remove the marks, such as labels and annotations, from the resource template ahead of ResourceBinding, - // otherwise might lose the chance to do that in a retry loop (in particular, the marks was successfully removed + // Must remove the claim metadata, such as labels and annotations, from the resource template ahead of ResourceBinding, + // otherwise might lose the chance to do that in a retry loop (in particular, the claim metadata was successfully removed // from ResourceBinding, but resource template not), since the ResourceBinding will not be listed again. - if err := d.CleanupResourceTemplateMarks(binding.Spec.Resource, cleanupMarksFunc); err != nil { - klog.Errorf("Failed to clean up marks from resource(%s-%s/%s) when propagationPolicy removed, error: %v", + if err := d.CleanupResourceTemplateClaimMetadata(binding.Spec.Resource, claimMetadata, CleanupPPClaimMetadata); err != nil { + klog.Errorf("Failed to clean up claim metadata from resource(%s-%s/%s) when propagationPolicy removed, error: %v", binding.Spec.Resource.Kind, binding.Spec.Resource.Namespace, binding.Spec.Resource.Name, err) errs = append(errs, err) // Skip cleaning up policy labels and annotations from ResourceBinding, give a chance to do that in a retry loop. continue } - // Clean up the marks from the reference binding so that the karmada scheduler won't reschedule the binding. - if err := d.CleanupResourceBindingMarks(&rbs.Items[index], cleanupMarksFunc); err != nil { - klog.Errorf("Failed to clean up marks from resource binding(%s/%s) when propagationPolicy removed, error: %v", + // Clean up the claim metadata from the reference binding so that the karmada scheduler won't reschedule the binding. + if err := d.CleanupResourceBindingClaimMetadata(&rbs.Items[index], claimMetadata, CleanupPPClaimMetadata); err != nil { + klog.Errorf("Failed to clean up claim metadata from resource binding(%s/%s) when propagationPolicy removed, error: %v", binding.Namespace, binding.Name, err) errs = append(errs, err) } @@ -1131,7 +1082,7 @@ func (d *ResourceDetector) HandlePropagationPolicyDeletion(policyID string) erro } // HandleClusterPropagationPolicyDeletion handles ClusterPropagationPolicy delete event. -// After a policy is removed, the label and annotation marked on relevant resource template will be removed (which gives +// After a policy is removed, the label and annotation claimed on relevant resource template will be removed (which gives // the resource template a change to match another policy). // // Note: The relevant ClusterResourceBinding or ResourceBinding will continue to exist until the resource template is gone. @@ -1141,11 +1092,6 @@ func (d *ResourceDetector) HandleClusterPropagationPolicyDeletion(policyID strin policyv1alpha1.ClusterPropagationPolicyPermanentIDLabel: policyID, } - cleanupMarksFun := func(obj metav1.Object) { - util.RemoveLabels(obj, clusterPropagationPolicyMarkedLabels...) - util.RemoveAnnotations(obj, clusterPropagationPolicyMarkedAnnotations...) - } - // load the ClusterResourceBindings which labeled with current policy crbs, err := helper.GetClusterResourceBindings(d.Client, labelSet) if err != nil { @@ -1153,20 +1099,20 @@ func (d *ResourceDetector) HandleClusterPropagationPolicyDeletion(policyID strin errs = append(errs, err) } else if len(crbs.Items) > 0 { for index, binding := range crbs.Items { - // Must remove the marks, such as labels and annotations, from the resource template ahead of + // Must remove the claim metadata, such as labels and annotations, from the resource template ahead of // ClusterResourceBinding, otherwise might lose the chance to do that in a retry loop (in particular, the - // marks was successfully removed from ClusterResourceBinding, but resource template not), since the + // claim metadata was successfully removed from ClusterResourceBinding, but resource template not), since the // ClusterResourceBinding will not be listed again. - if err := d.CleanupResourceTemplateMarks(binding.Spec.Resource, cleanupMarksFun); err != nil { - klog.Errorf("Failed to clean up marks from resource(%s-%s) when clusterPropagationPolicy removed, error: %v", + if err := d.CleanupResourceTemplateClaimMetadata(binding.Spec.Resource, labelSet, CleanupCPPClaimMetadata); err != nil { + klog.Errorf("Failed to clean up claim metadata from resource(%s-%s) when clusterPropagationPolicy removed, error: %v", binding.Spec.Resource.Kind, binding.Spec.Resource.Name, err) // Skip cleaning up policy labels and annotations from ClusterResourceBinding, give a chance to do that in a retry loop. continue } - // Clean up the marks from the reference binding so that the Karmada scheduler won't reschedule the binding. - if err := d.CleanupClusterResourceBindingMarks(&crbs.Items[index], cleanupMarksFun); err != nil { - klog.Errorf("Failed to clean up marks from clusterResourceBinding(%s) when clusterPropagationPolicy removed, error: %v", + // Clean up the claim metadata from the reference binding so that the Karmada scheduler won't reschedule the binding. + if err := d.CleanupClusterResourceBindingClaimMetadata(&crbs.Items[index], labelSet); err != nil { + klog.Errorf("Failed to clean up claim metadata from clusterResourceBinding(%s) when clusterPropagationPolicy removed, error: %v", binding.Name, err) errs = append(errs, err) } @@ -1180,20 +1126,20 @@ func (d *ResourceDetector) HandleClusterPropagationPolicyDeletion(policyID strin errs = append(errs, err) } else if len(rbs.Items) > 0 { for index, binding := range rbs.Items { - // Must remove the marks, such as labels and annotations, from the resource template ahead of ResourceBinding, + // Must remove the claim metadata, such as labels and annotations, from the resource template ahead of ResourceBinding, // otherwise might lose the chance to do that in a retry loop (in particular, the label was successfully // removed from ResourceBinding, but resource template not), since the ResourceBinding will not be listed again. - if err := d.CleanupResourceTemplateMarks(binding.Spec.Resource, cleanupMarksFun); err != nil { - klog.Errorf("Failed to clean up marks from resource(%s-%s/%s) when clusterPropagationPolicy removed, error: %v", + if err := d.CleanupResourceTemplateClaimMetadata(binding.Spec.Resource, labelSet, CleanupCPPClaimMetadata); err != nil { + klog.Errorf("Failed to clean up claim metadata from resource(%s-%s/%s) when clusterPropagationPolicy removed, error: %v", binding.Spec.Resource.Kind, binding.Spec.Resource.Namespace, binding.Spec.Resource.Name, err) errs = append(errs, err) // Skip cleaning up policy labels and annotations from ResourceBinding, give a chance to do that in a retry loop. continue } - // Clean up the marks from the reference binding so that the Karmada scheduler won't reschedule the binding. - if err := d.CleanupResourceBindingMarks(&rbs.Items[index], cleanupMarksFun); err != nil { - klog.Errorf("Failed to clean up marks from resourceBinding(%s/%s) when clusterPropagationPolicy removed, error: %v", + // Clean up the claim metadata from the reference binding so that the Karmada scheduler won't reschedule the binding. + if err := d.CleanupResourceBindingClaimMetadata(&rbs.Items[index], labelSet, CleanupCPPClaimMetadata); err != nil { + klog.Errorf("Failed to clean up claim metadata from resourceBinding(%s/%s) when clusterPropagationPolicy removed, error: %v", binding.Namespace, binding.Name, err) errs = append(errs, err) } @@ -1209,7 +1155,7 @@ func (d *ResourceDetector) HandleClusterPropagationPolicyDeletion(policyID strin // from waiting list and throw the object to it's reconcile queue. If not, do nothing. // Finally, handle the propagation policy preemption process if preemption is enabled. func (d *ResourceDetector) HandlePropagationPolicyCreationOrUpdate(policy *policyv1alpha1.PropagationPolicy) error { - // If the Policy's ResourceSelectors change, causing certain resources to no longer match the Policy, the label marked + // If the Policy's ResourceSelectors change, causing certain resources to no longer match the Policy, the label claimed // on relevant resource template will be removed (which gives the resource template a change to match another policy). policyID := policy.Labels[policyv1alpha1.PropagationPolicyPermanentIDLabel] err := d.cleanPPUnmatchedRBs(policyID, policy.Namespace, policy.Name, policy.Spec.ResourceSelectors) @@ -1251,7 +1197,7 @@ func (d *ResourceDetector) HandlePropagationPolicyCreationOrUpdate(policy *polic } // If preemption is enabled, handle the preemption process. - // If this policy succeeds in preempting resource managed by other policy, the label marked on relevant resource + // If this policy succeeds in preempting resource managed by other policy, the label claimed on relevant resource // will be replaced, which gives the resource template a change to match to this policy. if preemptionEnabled(policy.Spec.Preemption) { return d.handlePropagationPolicyPreemption(policy) @@ -1267,7 +1213,7 @@ func (d *ResourceDetector) HandlePropagationPolicyCreationOrUpdate(policy *polic // from waiting list and throw the object to it's reconcile queue. If not, do nothing. // Finally, handle the cluster propagation policy preemption process if preemption is enabled. func (d *ResourceDetector) HandleClusterPropagationPolicyCreationOrUpdate(policy *policyv1alpha1.ClusterPropagationPolicy) error { - // If the Policy's ResourceSelectors change, causing certain resources to no longer match the Policy, the label marked + // If the Policy's ResourceSelectors change, causing certain resources to no longer match the Policy, the label claimed // on relevant resource template will be removed (which gives the resource template a change to match another policy). policyID := policy.Labels[policyv1alpha1.ClusterPropagationPolicyPermanentIDLabel] err := d.cleanCPPUnmatchedRBs(policyID, policy.Name, policy.Spec.ResourceSelectors) @@ -1324,7 +1270,7 @@ func (d *ResourceDetector) HandleClusterPropagationPolicyCreationOrUpdate(policy } // If preemption is enabled, handle the preemption process. - // If this policy succeeds in preempting resource managed by other policy, the label marked on relevant resource + // If this policy succeeds in preempting resource managed by other policy, the label claimed on relevant resource // will be replaced, which gives the resource template a change to match to this policy. if preemptionEnabled(policy.Spec.Preemption) { return d.handleClusterPropagationPolicyPreemption(policy) @@ -1333,8 +1279,8 @@ func (d *ResourceDetector) HandleClusterPropagationPolicyCreationOrUpdate(policy return nil } -// CleanupResourceTemplateMarks removes marks, such as labels and annotations, from object referencing by objRef. -func (d *ResourceDetector) CleanupResourceTemplateMarks(objRef workv1alpha2.ObjectReference, cleanupFunc func(obj metav1.Object)) error { +// CleanupResourceTemplateClaimMetadata removes claim metadata, such as labels and annotations, from object referencing by objRef. +func (d *ResourceDetector) CleanupResourceTemplateClaimMetadata(objRef workv1alpha2.ObjectReference, targetClaimMetadata map[string]string, cleanupFunc func(obj metav1.Object)) error { gvr, err := restmapper.GetGroupVersionResource(d.RESTMapper, schema.FromAPIVersionAndKind(objRef.APIVersion, objRef.Kind)) if err != nil { klog.Errorf("Failed to convert GVR from GVK(%s/%s), err: %v", objRef.APIVersion, objRef.Kind, err) @@ -1352,6 +1298,11 @@ func (d *ResourceDetector) CleanupResourceTemplateMarks(objRef workv1alpha2.Obje return err } + if !NeedCleanupClaimMetadata(workload, targetClaimMetadata) { + klog.Infof("No need to clean up the claim metadata on resource(kind=%s, %s/%s) since they have changed", workload.GetKind(), workload.GetNamespace(), workload.GetName()) + return nil + } + cleanupFunc(workload) _, err = d.DynamicClient.Resource(gvr).Namespace(workload.GetNamespace()).Update(context.TODO(), workload, metav1.UpdateOptions{}) @@ -1364,9 +1315,13 @@ func (d *ResourceDetector) CleanupResourceTemplateMarks(objRef workv1alpha2.Obje }) } -// CleanupResourceBindingMarks removes marks, such as labels and annotations, from resource binding. -func (d *ResourceDetector) CleanupResourceBindingMarks(rb *workv1alpha2.ResourceBinding, cleanupFunc func(obj metav1.Object)) error { +// CleanupResourceBindingClaimMetadata removes claim metadata, such as labels and annotations, from resource binding. +func (d *ResourceDetector) CleanupResourceBindingClaimMetadata(rb *workv1alpha2.ResourceBinding, targetClaimMetadata map[string]string, cleanupFunc func(obj metav1.Object)) error { return retry.RetryOnConflict(retry.DefaultRetry, func() (err error) { + if !NeedCleanupClaimMetadata(rb, targetClaimMetadata) { + klog.Infof("No need to clean up the claim metadata on ResourceBinding(%s/%s) since they have changed", rb.GetNamespace(), rb.GetName()) + return nil + } cleanupFunc(rb) updateErr := d.Client.Update(context.TODO(), rb) if updateErr == nil { @@ -1377,16 +1332,20 @@ func (d *ResourceDetector) CleanupResourceBindingMarks(rb *workv1alpha2.Resource if err = d.Client.Get(context.TODO(), client.ObjectKey{Namespace: rb.GetNamespace(), Name: rb.GetName()}, updated); err == nil { rb = updated.DeepCopy() } else { - klog.Errorf("Failed to get updated resource binding %s/%s: %v", rb.GetNamespace(), rb.GetName(), err) + klog.Errorf("Failed to get updated ResourceBinding(%s/%s): %v", rb.GetNamespace(), rb.GetName(), err) } return updateErr }) } -// CleanupClusterResourceBindingMarks removes marks, such as labels and annotations, from cluster resource binding. -func (d *ResourceDetector) CleanupClusterResourceBindingMarks(crb *workv1alpha2.ClusterResourceBinding, cleanupFunc func(obj metav1.Object)) error { +// CleanupClusterResourceBindingClaimMetadata removes claim metadata, such as labels and annotations, from cluster resource binding. +func (d *ResourceDetector) CleanupClusterResourceBindingClaimMetadata(crb *workv1alpha2.ClusterResourceBinding, targetClaimMetadata map[string]string) error { return retry.RetryOnConflict(retry.DefaultRetry, func() (err error) { - cleanupFunc(crb) + if !NeedCleanupClaimMetadata(crb, targetClaimMetadata) { + klog.Infof("No need to clean up the claim metadata on ClusterResourceBinding(%s) since they have changed", crb.GetName()) + return nil + } + CleanupCPPClaimMetadata(crb) updateErr := d.Client.Update(context.TODO(), crb) if updateErr == nil { return nil @@ -1396,7 +1355,7 @@ func (d *ResourceDetector) CleanupClusterResourceBindingMarks(crb *workv1alpha2. if err = d.Client.Get(context.TODO(), client.ObjectKey{Name: crb.GetName()}, updated); err == nil { crb = updated.DeepCopy() } else { - klog.Errorf("Failed to get updated cluster resource binding %s: %v", crb.GetName(), err) + klog.Errorf("Failed to get updated ClusterResourceBinding(%s):: %v", crb.GetName(), err) } return updateErr }) diff --git a/pkg/detector/policy.go b/pkg/detector/policy.go index 47b1077e62ec..1a1b7c69c3fe 100644 --- a/pkg/detector/policy.go +++ b/pkg/detector/policy.go @@ -22,6 +22,7 @@ import ( "time" apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/util/errors" @@ -187,7 +188,7 @@ func (d *ResourceDetector) cleanPPUnmatchedRBs(policyID, policyNamespace, policy return err } - return d.removeRBsMarks(bindings, selectors, propagationPolicyMarkedLabels, propagationPolicyMarkedAnnotations) + return d.removeRBsClaimMetadata(bindings, selectors, propagationPolicyClaimLabels, propagationPolicyClaimAnnotations) } func (d *ResourceDetector) cleanCPPUnmatchedRBs(policyID, policyName string, selectors []policyv1alpha1.ResourceSelector) error { @@ -196,7 +197,7 @@ func (d *ResourceDetector) cleanCPPUnmatchedRBs(policyID, policyName string, sel return err } - return d.removeRBsMarks(bindings, selectors, clusterPropagationPolicyMarkedLabels, clusterPropagationPolicyMarkedAnnotations) + return d.removeRBsClaimMetadata(bindings, selectors, clusterPropagationPolicyClaimLabels, clusterPropagationPolicyClaimAnnotations) } func (d *ResourceDetector) cleanUnmatchedCRBs(policyID, policyName string, selectors []policyv1alpha1.ResourceSelector) error { @@ -205,13 +206,13 @@ func (d *ResourceDetector) cleanUnmatchedCRBs(policyID, policyName string, selec return err } - return d.removeCRBsMarks(bindings, selectors, clusterPropagationPolicyMarkedLabels, clusterPropagationPolicyMarkedAnnotations) + return d.removeCRBsClaimMetadata(bindings, selectors, clusterPropagationPolicyClaimLabels, clusterPropagationPolicyClaimAnnotations) } -func (d *ResourceDetector) removeRBsMarks(bindings *workv1alpha2.ResourceBindingList, selectors []policyv1alpha1.ResourceSelector, labels, annotations []string) error { +func (d *ResourceDetector) removeRBsClaimMetadata(bindings *workv1alpha2.ResourceBindingList, selectors []policyv1alpha1.ResourceSelector, labels, annotations []string) error { var errs []error for _, binding := range bindings.Items { - removed, err := d.removeResourceMarksIfNotMatched(binding.Spec.Resource, selectors, labels, annotations) + removed, err := d.removeResourceClaimMetadataIfNotMatched(binding.Spec.Resource, selectors, labels, annotations) if err != nil { klog.Errorf("Failed to remove resource labels and annotations when resource not match with policy selectors, err: %v", err) errs = append(errs, err) @@ -234,11 +235,11 @@ func (d *ResourceDetector) removeRBsMarks(bindings *workv1alpha2.ResourceBinding return errors.NewAggregate(errs) } -func (d *ResourceDetector) removeCRBsMarks(bindings *workv1alpha2.ClusterResourceBindingList, +func (d *ResourceDetector) removeCRBsClaimMetadata(bindings *workv1alpha2.ClusterResourceBindingList, selectors []policyv1alpha1.ResourceSelector, removeLabels, removeAnnotations []string) error { var errs []error for _, binding := range bindings.Items { - removed, err := d.removeResourceMarksIfNotMatched(binding.Spec.Resource, selectors, removeLabels, removeAnnotations) + removed, err := d.removeResourceClaimMetadataIfNotMatched(binding.Spec.Resource, selectors, removeLabels, removeAnnotations) if err != nil { klog.Errorf("Failed to remove resource labels and annotations when resource not match with policy selectors, err: %v", err) errs = append(errs, err) @@ -261,7 +262,7 @@ func (d *ResourceDetector) removeCRBsMarks(bindings *workv1alpha2.ClusterResourc return errors.NewAggregate(errs) } -func (d *ResourceDetector) removeResourceMarksIfNotMatched(objectReference workv1alpha2.ObjectReference, +func (d *ResourceDetector) removeResourceClaimMetadataIfNotMatched(objectReference workv1alpha2.ObjectReference, selectors []policyv1alpha1.ResourceSelector, labels, annotations []string) (bool, error) { objectKey, err := helper.ConstructClusterWideKey(objectReference) if err != nil { @@ -340,10 +341,10 @@ func (d *ResourceDetector) listCPPDerivedCRBs(policyID, policyName string) (*wor // excludeClusterPolicy excludes cluster propagation policy. // If propagation policy was claimed, cluster propagation policy should not exist. -func excludeClusterPolicy(objLabels map[string]string) bool { - if _, ok := objLabels[policyv1alpha1.ClusterPropagationPolicyPermanentIDLabel]; !ok { +func excludeClusterPolicy(obj metav1.Object) (hasClaimedClusterPolicy bool) { + if _, ok := obj.GetLabels()[policyv1alpha1.ClusterPropagationPolicyPermanentIDLabel]; !ok { return false } - delete(objLabels, policyv1alpha1.ClusterPropagationPolicyPermanentIDLabel) + CleanupCPPClaimMetadata(obj) return true } diff --git a/pkg/detector/policy_test.go b/pkg/detector/policy_test.go index df5722cf48c8..90cc8bc9b9cb 100644 --- a/pkg/detector/policy_test.go +++ b/pkg/detector/policy_test.go @@ -447,7 +447,7 @@ func Test_cleanUnmatchedCRBs(t *testing.T) { } } -func Test_removeRBsMarks(t *testing.T) { +func Test_removeRBsClaimMetadata(t *testing.T) { scheme := runtime.NewScheme() utilruntime.Must(clientgoscheme.AddToScheme(scheme)) utilruntime.Must(appsv1.AddToScheme(scheme)) @@ -664,15 +664,15 @@ func Test_removeRBsMarks(t *testing.T) { RESTMapper: fakeClient.RESTMapper(), InformerManager: genMgr, } - err := resourceDetector.removeRBsMarks(tt.bindings, tt.selectors, tt.removeLabels, tt.removeAnnotations) + err := resourceDetector.removeRBsClaimMetadata(tt.bindings, tt.selectors, tt.removeLabels, tt.removeAnnotations) if (err != nil) != tt.wantErr { - t.Errorf("removeRBsMarks() error = %v, wantErr %v", err, tt.wantErr) + t.Errorf("removeRBsClaimMetadata() error = %v, wantErr %v", err, tt.wantErr) } }) } } -func Test_removeCRBsMarks(t *testing.T) { +func Test_removeCRBsClaimMetadata(t *testing.T) { scheme := runtime.NewScheme() utilruntime.Must(clientgoscheme.AddToScheme(scheme)) utilruntime.Must(appsv1.AddToScheme(scheme)) @@ -889,15 +889,15 @@ func Test_removeCRBsMarks(t *testing.T) { RESTMapper: fakeClient.RESTMapper(), InformerManager: genMgr, } - err := resourceDetector.removeCRBsMarks(tt.bindings, tt.selectors, tt.removeLabels, tt.removeAnnotations) + err := resourceDetector.removeCRBsClaimMetadata(tt.bindings, tt.selectors, tt.removeLabels, tt.removeAnnotations) if (err != nil) != tt.wantErr { - t.Errorf("removeCRBsMarks() error = %v, wantErr %v", err, tt.wantErr) + t.Errorf("removeCRBsClaimMetadata() error = %v, wantErr %v", err, tt.wantErr) } }) } } -func Test_removeResourceMarksIfNotMatched(t *testing.T) { +func Test_removeResourceClaimMetadataIfNotMatched(t *testing.T) { scheme := runtime.NewScheme() utilruntime.Must(clientgoscheme.AddToScheme(scheme)) utilruntime.Must(appsv1.AddToScheme(scheme)) @@ -1113,13 +1113,13 @@ func Test_removeResourceMarksIfNotMatched(t *testing.T) { InformerManager: genMgr, } - updated, err := resourceDetector.removeResourceMarksIfNotMatched(tt.objectReference, tt.selectors, tt.labels, tt.annotations) + updated, err := resourceDetector.removeResourceClaimMetadataIfNotMatched(tt.objectReference, tt.selectors, tt.labels, tt.annotations) if (err != nil) != tt.wantErr { - t.Errorf("removeResourceMarksIfNotMatched() error = %v, wantErr %v", err, tt.wantErr) + t.Errorf("removeResourceClaimMetadataIfNotMatched() error = %v, wantErr %v", err, tt.wantErr) } if updated != tt.wantUpdated { - t.Errorf("removeResourceMarksIfNotMatched() = %v, want %v", updated, tt.wantUpdated) + t.Errorf("removeResourceClaimMetadataIfNotMatched() = %v, want %v", updated, tt.wantUpdated) } }) } @@ -1363,26 +1363,54 @@ func Test_listCPPDerivedCRBs(t *testing.T) { func Test_excludeClusterPolicy(t *testing.T) { tests := []struct { - name string - objLabels map[string]string - want bool + name string + obj metav1.Object + result metav1.Object + hasClaimedClusterPolicy bool }{ { - name: "propagation policy was claimed", - objLabels: map[string]string{}, - want: false, + name: "propagation policy was claimed", + obj: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "metadata": map[string]interface{}{ + "labels": map[string]interface{}{}, + }, + }, + }, + result: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "metadata": map[string]interface{}{ + "labels": map[string]interface{}{}, + }, + }, + }, + hasClaimedClusterPolicy: false, }, { name: "propagation policy was not claimed", - objLabels: map[string]string{ - policyv1alpha1.ClusterPropagationPolicyPermanentIDLabel: "f2507cgb-f3f3-4a4b-b289-5691a4fef979", + obj: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "metadata": map[string]interface{}{ + "labels": map[string]interface{}{policyv1alpha1.ClusterPropagationPolicyPermanentIDLabel: "f2507cgb-f3f3-4a4b-b289-5691a4fef979", "foo": "bar"}, + "annotations": map[string]interface{}{policyv1alpha1.ClusterPropagationPolicyAnnotation: "nginx", "foo1": "bar1"}, + }, + }, + }, + result: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "metadata": map[string]interface{}{ + "labels": map[string]interface{}{"foo": "bar"}, + "annotations": map[string]interface{}{"foo1": "bar1"}, + }, + }, }, - want: true, + hasClaimedClusterPolicy: true, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got := excludeClusterPolicy(tt.objLabels) - assert.Equal(t, tt.want, got) + got := excludeClusterPolicy(tt.obj) + assert.Equal(t, tt.obj, tt.result) + assert.Equal(t, tt.hasClaimedClusterPolicy, got) }) } } diff --git a/pkg/generated/openapi/zz_generated.openapi.go b/pkg/generated/openapi/zz_generated.openapi.go index 28551c07266e..31f52beca3e9 100644 --- a/pkg/generated/openapi/zz_generated.openapi.go +++ b/pkg/generated/openapi/zz_generated.openapi.go @@ -109,9 +109,11 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1.FederatedResourceQuotaList": schema_pkg_apis_policy_v1alpha1_FederatedResourceQuotaList(ref), "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1.FederatedResourceQuotaSpec": schema_pkg_apis_policy_v1alpha1_FederatedResourceQuotaSpec(ref), "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1.FederatedResourceQuotaStatus": schema_pkg_apis_policy_v1alpha1_FederatedResourceQuotaStatus(ref), + "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1.FieldOverrider": schema_pkg_apis_policy_v1alpha1_FieldOverrider(ref), "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1.FieldSelector": schema_pkg_apis_policy_v1alpha1_FieldSelector(ref), "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1.ImageOverrider": schema_pkg_apis_policy_v1alpha1_ImageOverrider(ref), "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1.ImagePredicate": schema_pkg_apis_policy_v1alpha1_ImagePredicate(ref), + "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1.JSONPatchOperation": schema_pkg_apis_policy_v1alpha1_JSONPatchOperation(ref), "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1.LabelAnnotationOverrider": schema_pkg_apis_policy_v1alpha1_LabelAnnotationOverrider(ref), "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1.OverridePolicy": schema_pkg_apis_policy_v1alpha1_OverridePolicy(ref), "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1.OverridePolicyList": schema_pkg_apis_policy_v1alpha1_OverridePolicyList(ref), @@ -130,6 +132,7 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1.StaticClusterWeight": schema_pkg_apis_policy_v1alpha1_StaticClusterWeight(ref), "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1.SuspendClusters": schema_pkg_apis_policy_v1alpha1_SuspendClusters(ref), "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1.Suspension": schema_pkg_apis_policy_v1alpha1_Suspension(ref), + "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1.YAMLPatchOperation": schema_pkg_apis_policy_v1alpha1_YAMLPatchOperation(ref), "github.com/karmada-io/karmada/pkg/apis/remedy/v1alpha1.ClusterAffinity": schema_pkg_apis_remedy_v1alpha1_ClusterAffinity(ref), "github.com/karmada-io/karmada/pkg/apis/remedy/v1alpha1.ClusterConditionRequirement": schema_pkg_apis_remedy_v1alpha1_ClusterConditionRequirement(ref), "github.com/karmada-io/karmada/pkg/apis/remedy/v1alpha1.DecisionMatch": schema_pkg_apis_remedy_v1alpha1_DecisionMatch(ref), @@ -4192,6 +4195,58 @@ func schema_pkg_apis_policy_v1alpha1_FederatedResourceQuotaStatus(ref common.Ref } } +func schema_pkg_apis_policy_v1alpha1_FieldOverrider(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "FieldOverrider represents the rules dedicated to modifying a specific field in any Kubernetes resource. This allows changing a single field within the resource with multiple operations. It is designed to handle structured field values such as those found in ConfigMaps or Secrets. The current implementation supports JSON and YAML formats, but can easily be extended to support XML in the future. Note: In any given instance, FieldOverrider processes either JSON or YAML fields, but not both simultaneously.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "fieldPath": { + SchemaProps: spec.SchemaProps{ + Description: "FieldPath specifies the initial location in the instance document where the operation should take place. The path uses RFC 6901 for navigating into nested structures. For example, the path \"/data/db-config.yaml\" specifies the configuration data key named \"db-config.yaml\" in a ConfigMap: \"/data/db-config.yaml\".", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "json": { + SchemaProps: spec.SchemaProps{ + Description: "JSON represents the operations performed on the JSON document specified by the FieldPath.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1.JSONPatchOperation"), + }, + }, + }, + }, + }, + "yaml": { + SchemaProps: spec.SchemaProps{ + Description: "YAML represents the operations performed on the YAML document specified by the FieldPath.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1.YAMLPatchOperation"), + }, + }, + }, + }, + }, + }, + Required: []string{"fieldPath"}, + }, + }, + Dependencies: []string{ + "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1.JSONPatchOperation", "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1.YAMLPatchOperation"}, + } +} + func schema_pkg_apis_policy_v1alpha1_FieldSelector(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ @@ -4288,6 +4343,44 @@ func schema_pkg_apis_policy_v1alpha1_ImagePredicate(ref common.ReferenceCallback } } +func schema_pkg_apis_policy_v1alpha1_JSONPatchOperation(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "JSONPatchOperation represents a single field modification operation for JSON format.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "subPath": { + SchemaProps: spec.SchemaProps{ + Description: "SubPath specifies the relative location within the initial FieldPath where the operation should take place. The path uses RFC 6901 for navigating into nested structures.", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "operator": { + SchemaProps: spec.SchemaProps{ + Description: "Operator indicates the operation on target field. Available operators are: \"add\", \"remove\", and \"replace\".", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "value": { + SchemaProps: spec.SchemaProps{ + Description: "Value is the new value to set for the specified field if the operation is \"add\" or \"replace\". For \"remove\" operation, this field is ignored.", + Ref: ref("k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1.JSON"), + }, + }, + }, + Required: []string{"subPath", "operator"}, + }, + }, + Dependencies: []string{ + "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1.JSON"}, + } +} + func schema_pkg_apis_policy_v1alpha1_LabelAnnotationOverrider(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ @@ -4479,7 +4572,7 @@ func schema_pkg_apis_policy_v1alpha1_Overriders(ref common.ReferenceCallback) co return common.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "Overriders offers various alternatives to represent the override rules.\n\nIf more than one alternative exists, they will be applied with following order: - ImageOverrider - CommandOverrider - ArgsOverrider - LabelsOverrider - AnnotationsOverrider - Plaintext", + Description: "Overriders offers various alternatives to represent the override rules.\n\nIf more than one alternative exists, they will be applied with following order: - ImageOverrider - CommandOverrider - ArgsOverrider - LabelsOverrider - AnnotationsOverrider - FieldOverrider - Plaintext", Type: []string{"object"}, Properties: map[string]spec.Schema{ "plaintext": { @@ -4566,11 +4659,25 @@ func schema_pkg_apis_policy_v1alpha1_Overriders(ref common.ReferenceCallback) co }, }, }, + "fieldOverrider": { + SchemaProps: spec.SchemaProps{ + Description: "FieldOverrider represents the rules dedicated to modifying a specific field in any Kubernetes resource. This allows changing a single field within the resource with multiple operations. It is designed to handle structured field values such as those found in ConfigMaps or Secrets. The current implementation supports JSON and YAML formats, but can easily be extended to support XML in the future.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1.FieldOverrider"), + }, + }, + }, + }, + }, }, }, }, Dependencies: []string{ - "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1.CommandArgsOverrider", "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1.ImageOverrider", "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1.LabelAnnotationOverrider", "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1.PlaintextOverrider"}, + "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1.CommandArgsOverrider", "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1.FieldOverrider", "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1.ImageOverrider", "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1.LabelAnnotationOverrider", "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1.PlaintextOverrider"}, } } @@ -4878,6 +4985,13 @@ func schema_pkg_apis_policy_v1alpha1_PropagationSpec(ref common.ReferenceCallbac Ref: ref("github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1.Suspension"), }, }, + "preserveResourcesOnDeletion": { + SchemaProps: spec.SchemaProps{ + Description: "PreserveResourcesOnDeletion controls whether resources should be preserved on the member clusters when the resource template is deleted. If set to true, resources will be preserved on the member clusters. Default is false, which means resources will be deleted along with the resource template.\n\nThis setting is particularly useful during workload migration scenarios to ensure that rollback can occur quickly without affecting the workloads running on the member clusters.\n\nAdditionally, this setting applies uniformly across all member clusters and will not selectively control preservation on only some clusters.\n\nNote: This setting does not apply to the deletion of the policy itself. When the policy is deleted, the resource templates and their corresponding propagated resources in member clusters will remain unchanged unless explicitly deleted.", + Type: []string{"boolean"}, + Format: "", + }, + }, }, Required: []string{"resourceSelectors"}, }, @@ -5169,6 +5283,44 @@ func schema_pkg_apis_policy_v1alpha1_Suspension(ref common.ReferenceCallback) co } } +func schema_pkg_apis_policy_v1alpha1_YAMLPatchOperation(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "YAMLPatchOperation represents a single field modification operation for YAML format.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "subPath": { + SchemaProps: spec.SchemaProps{ + Description: "SubPath specifies the relative location within the initial FieldPath where the operation should take place. The path uses RFC 6901 for navigating into nested structures.", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "operator": { + SchemaProps: spec.SchemaProps{ + Description: "Operator indicates the operation on target field. Available operators are: \"add\", \"remove\", and \"replace\".", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "value": { + SchemaProps: spec.SchemaProps{ + Description: "Value is the new value to set for the specified field if the operation is \"add\" or \"replace\". For \"remove\" operation, this field is ignored.", + Ref: ref("k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1.JSON"), + }, + }, + }, + Required: []string{"subPath", "operator"}, + }, + }, + Dependencies: []string{ + "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1.JSON"}, + } +} + func schema_pkg_apis_remedy_v1alpha1_ClusterAffinity(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ @@ -6378,7 +6530,14 @@ func schema_pkg_apis_work_v1alpha1_WorkSpec(ref common.ReferenceCallback) common }, "suspendDispatching": { SchemaProps: spec.SchemaProps{ - Description: "SuspendDispatching controls whether dispatching should be suspended, nil means not suspend. Note: true means stop propagating to all clusters.", + Description: "SuspendDispatching controls whether dispatching should be suspended, nil means not suspend. Note: true means stop propagating to the corresponding member cluster, and does not prevent status collection.", + Type: []string{"boolean"}, + Format: "", + }, + }, + "preserveResourcesOnDeletion": { + SchemaProps: spec.SchemaProps{ + Description: "PreserveResourcesOnDeletion controls whether resources should be preserved on the member cluster when the Work object is deleted. If set to true, resources will be preserved on the member cluster. Default is false, which means resources will be deleted along with the Work object.", Type: []string{"boolean"}, Format: "", }, @@ -7105,6 +7264,13 @@ func schema_pkg_apis_work_v1alpha2_ResourceBindingSpec(ref common.ReferenceCallb Ref: ref("github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1.Suspension"), }, }, + "preserveResourcesOnDeletion": { + SchemaProps: spec.SchemaProps{ + Description: "PreserveResourcesOnDeletion controls whether resources should be preserved on the member clusters when the binding object is deleted. If set to true, resources will be preserved on the member clusters. Default is false, which means resources will be deleted along with the binding object. This setting applies to all Work objects created under this binding object.", + Type: []string{"boolean"}, + Format: "", + }, + }, }, Required: []string{"resource"}, }, diff --git a/pkg/karmadactl/addons/descheduler/manifests.go b/pkg/karmadactl/addons/descheduler/manifests.go index ad28d37d4aa6..59e5755741ae 100644 --- a/pkg/karmadactl/addons/descheduler/manifests.go +++ b/pkg/karmadactl/addons/descheduler/manifests.go @@ -45,7 +45,7 @@ spec: command: - /bin/karmada-descheduler - --kubeconfig=/etc/kubeconfig - - --metrics-bind-address=0.0.0.0:10358 + - --metrics-bind-address=0.0.0.0:8080 - --health-probe-bind-address=0.0.0.0:10358 - --leader-elect-resource-namespace={{ .Namespace }} - --scheduler-estimator-ca-file=/etc/karmada/pki/ca.crt @@ -62,7 +62,7 @@ spec: periodSeconds: 15 timeoutSeconds: 5 ports: - - containerPort: 10358 + - containerPort: 8080 name: metrics protocol: TCP volumeMounts: diff --git a/pkg/karmadactl/cmdinit/karmada/deploy.go b/pkg/karmadactl/cmdinit/karmada/deploy.go index 6c8a751884e6..7af7791d3601 100644 --- a/pkg/karmadactl/cmdinit/karmada/deploy.go +++ b/pkg/karmadactl/cmdinit/karmada/deploy.go @@ -221,7 +221,7 @@ func crdPatchesResources(filename, caBundle string) ([]byte, error) { } // createCRDs create crd resource -func createCRDs(crdClient *clientset.Clientset, filename string) error { +func createCRDs(crdClient clientset.Interface, filename string) error { obj := apiextensionsv1.CustomResourceDefinition{} data, err := os.ReadFile(filename) if err != nil { @@ -252,7 +252,7 @@ func createCRDs(crdClient *clientset.Clientset, filename string) error { } // patchCRDs patch crd resource -func patchCRDs(crdClient *clientset.Clientset, caBundle, filename string) error { +func patchCRDs(crdClient clientset.Interface, caBundle, filename string) error { data, err := crdPatchesResources(filename, caBundle) if err != nil { return err diff --git a/pkg/karmadactl/cmdinit/kubernetes/deployments.go b/pkg/karmadactl/cmdinit/kubernetes/deployments.go index 7a6b487a6657..835ec04c263a 100644 --- a/pkg/karmadactl/cmdinit/kubernetes/deployments.go +++ b/pkg/karmadactl/cmdinit/kubernetes/deployments.go @@ -449,7 +449,7 @@ func (i *CommandInitOption) makeKarmadaSchedulerDeployment() *appsv1.Deployment Command: []string{ "/bin/karmada-scheduler", "--kubeconfig=/etc/kubeconfig", - "--metrics-bind-address=0.0.0.0:10351", + "--metrics-bind-address=0.0.0.0:8080", "--health-probe-bind-address=0.0.0.0:10351", "--enable-scheduler-estimator=true", "--leader-elect=true", @@ -463,7 +463,7 @@ func (i *CommandInitOption) makeKarmadaSchedulerDeployment() *appsv1.Deployment Ports: []corev1.ContainerPort{ { Name: metricsPortName, - ContainerPort: 10351, + ContainerPort: 8080, Protocol: corev1.ProtocolTCP, }, }, diff --git a/pkg/karmadactl/promote/promote.go b/pkg/karmadactl/promote/promote.go index ded8253f5bfa..2cb99167cce1 100644 --- a/pkg/karmadactl/promote/promote.go +++ b/pkg/karmadactl/promote/promote.go @@ -38,6 +38,7 @@ import ( "k8s.io/klog/v2" cmdutil "k8s.io/kubectl/pkg/cmd/util" "k8s.io/kubectl/pkg/util/templates" + "k8s.io/utils/ptr" configv1alpha1 "github.com/karmada-io/karmada/pkg/apis/config/v1alpha1" policyv1alpha1 "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1" @@ -553,12 +554,14 @@ func (o *CommandPromoteOption) promote(controlPlaneRestConfig *rest.Config, obj if err != nil { return fmt.Errorf("failed to create resource %q(%s) in control plane: %v", gvr, o.name, err) } + fmt.Printf("ResourceTemplate (%s/%s) is created successfully\n", o.Namespace, o.name) if o.AutoCreatePolicy { - err = o.createClusterPropagationPolicy(karmadaClient, gvr, isDep) + policyName, err := o.createClusterPropagationPolicy(karmadaClient, gvr, isDep) if err != nil { return err } + fmt.Printf("ClusterPropagationPolicy %s is created successfully\n", policyName) } fmt.Printf("Resource %q(%s) is promoted successfully\n", gvr, o.name) @@ -578,12 +581,14 @@ func (o *CommandPromoteOption) promote(controlPlaneRestConfig *rest.Config, obj if err != nil { return fmt.Errorf("failed to create resource %q(%s/%s) in control plane: %v", gvr, o.Namespace, o.name, err) } + fmt.Printf("ResourceTemplate (%s/%s) is created successfully\n", o.Namespace, o.name) if o.AutoCreatePolicy { - err = o.createPropagationPolicy(karmadaClient, gvr, isDep) + policyName, err := o.createPropagationPolicy(karmadaClient, gvr, isDep) if err != nil { return err } + fmt.Printf("PropagationPolicy (%s/%s) is created successfully\n", o.Namespace, policyName) } fmt.Printf("Resource %q(%s/%s) is promoted successfully\n", gvr, o.Namespace, o.name) @@ -658,7 +663,7 @@ func (o *CommandPromoteOption) printObjectAndPolicy(obj *unstructured.Unstructur } // createPropagationPolicy create PropagationPolicy in karmada control plane -func (o *CommandPromoteOption) createPropagationPolicy(karmadaClient *karmadaclientset.Clientset, gvr schema.GroupVersionResource, isDep bool) error { +func (o *CommandPromoteOption) createPropagationPolicy(karmadaClient *karmadaclientset.Clientset, gvr schema.GroupVersionResource, isDep bool) (string, error) { var policyName string if o.PolicyName == "" { policyName = names.GeneratePolicyName(o.Namespace, o.name, o.gvk.String()) @@ -671,18 +676,18 @@ func (o *CommandPromoteOption) createPropagationPolicy(karmadaClient *karmadacli pp := buildPropagationPolicy(o.name, policyName, o.Namespace, o.Cluster, gvr, o.gvk, isDep) _, err = karmadaClient.PolicyV1alpha1().PropagationPolicies(o.Namespace).Create(context.TODO(), pp, metav1.CreateOptions{}) - return err + return policyName, err } if err != nil { - return fmt.Errorf("failed to get PropagationPolicy(%s/%s) in control plane: %v", o.Namespace, policyName, err) + return policyName, fmt.Errorf("failed to get PropagationPolicy(%s/%s) in control plane: %v", o.Namespace, policyName, err) } // PropagationPolicy already exists, not to create it - return fmt.Errorf("the PropagationPolicy(%s/%s) already exist, please edit it to propagate resource", o.Namespace, policyName) + return policyName, fmt.Errorf("the PropagationPolicy(%s/%s) already exist, please edit it to propagate resource", o.Namespace, policyName) } // createClusterPropagationPolicy create ClusterPropagationPolicy in karmada control plane -func (o *CommandPromoteOption) createClusterPropagationPolicy(karmadaClient *karmadaclientset.Clientset, gvr schema.GroupVersionResource, isDep bool) error { +func (o *CommandPromoteOption) createClusterPropagationPolicy(karmadaClient *karmadaclientset.Clientset, gvr schema.GroupVersionResource, isDep bool) (string, error) { var policyName string if o.PolicyName == "" { policyName = names.GeneratePolicyName("", o.name, o.gvk.String()) @@ -695,14 +700,14 @@ func (o *CommandPromoteOption) createClusterPropagationPolicy(karmadaClient *kar cpp := buildClusterPropagationPolicy(o.name, policyName, o.Cluster, gvr, o.gvk, isDep) _, err = karmadaClient.PolicyV1alpha1().ClusterPropagationPolicies().Create(context.TODO(), cpp, metav1.CreateOptions{}) - return err + return policyName, err } if err != nil { - return fmt.Errorf("failed to get ClusterPropagationPolicy(%s) in control plane: %v", policyName, err) + return policyName, fmt.Errorf("failed to get ClusterPropagationPolicy(%s) in control plane: %v", policyName, err) } // ClusterPropagationPolicy already exists, not to create it - return fmt.Errorf("the ClusterPropagationPolicy(%s) already exist, please edit it to propagate resource", policyName) + return policyName, fmt.Errorf("the ClusterPropagationPolicy(%s) already exist, please edit it to propagate resource", policyName) } // preprocessResource delete redundant fields to convert resource as template @@ -750,6 +755,8 @@ func buildPropagationPolicy(resourceName, policyName, namespace, cluster string, ClusterNames: []string{cluster}, }, }, + ConflictResolution: policyv1alpha1.ConflictOverwrite, + PreserveResourcesOnDeletion: ptr.To[bool](true), }, } return pp @@ -775,6 +782,8 @@ func buildClusterPropagationPolicy(resourceName, policyName, cluster string, gvr ClusterNames: []string{cluster}, }, }, + ConflictResolution: policyv1alpha1.ConflictOverwrite, + PreserveResourcesOnDeletion: ptr.To[bool](true), }, } return cpp diff --git a/pkg/karmadactl/util/apiclient/apiclient.go b/pkg/karmadactl/util/apiclient/apiclient.go index d5194970f412..3dfe9432de29 100644 --- a/pkg/karmadactl/util/apiclient/apiclient.go +++ b/pkg/karmadactl/util/apiclient/apiclient.go @@ -98,7 +98,7 @@ func NewClientSet(c *rest.Config) (*kubernetes.Clientset, error) { } // NewCRDsClient is to create a clientset ClientSet -func NewCRDsClient(c *rest.Config) (*clientset.Clientset, error) { +func NewCRDsClient(c *rest.Config) (clientset.Interface, error) { return clientset.NewForConfig(c) } diff --git a/pkg/modeling/modeling.go b/pkg/modeling/modeling.go index 5564a1bebcb4..9e4842cae0a2 100644 --- a/pkg/modeling/modeling.go +++ b/pkg/modeling/modeling.go @@ -163,7 +163,7 @@ func (rs *ResourceSummary) clusterResourceNodeComparator(a, b interface{}) int { func (rs *ResourceSummary) AddToResourceSummary(crn ClusterResourceNode) { index := rs.getIndex(crn) if index == -1 { - klog.Error("ClusterResource can not add to resource summary: index is invalid.") + klog.Errorf("Failed to add node to resource summary due to no appropriate grade. ClusterResourceNode:%v", crn) return } modeling := &(*rs).RMs[index] diff --git a/pkg/resourceinterpreter/default/native/dependencies.go b/pkg/resourceinterpreter/default/native/dependencies.go index 94aaf2ba22fb..0e6095059672 100644 --- a/pkg/resourceinterpreter/default/native/dependencies.go +++ b/pkg/resourceinterpreter/default/native/dependencies.go @@ -27,6 +27,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/sets" mcsv1alpha1 "sigs.k8s.io/mcs-api/pkg/apis/v1alpha1" configv1alpha1 "github.com/karmada-io/karmada/pkg/apis/config/v1alpha1" @@ -132,7 +133,36 @@ func getStatefulSetDependencies(object *unstructured.Unstructured) ([]configv1al return nil, err } - return helper.GetDependenciesFromPodTemplate(podObj) + deps, err := helper.GetDependenciesFromPodTemplate(podObj) + if err != nil { + return nil, err + } + + if len(statefulSetObj.Spec.VolumeClaimTemplates) == 0 { + return deps, nil + } + + // ignore the PersistentVolumeClaim dependency if it was created by the StatefulSet VolumeClaimTemplates + // the PVC dependency is not needed because the StatefulSet will manage the pvcs in the member cluster, + // if it exists here it was just a placeholder not a real PVC + var validDeps []configv1alpha1.DependentObjectReference + volumeClaimTemplateNames := sets.Set[string]{} + for i := range statefulSetObj.Spec.VolumeClaimTemplates { + volumeClaimTemplateNames.Insert(statefulSetObj.Spec.VolumeClaimTemplates[i].Name) + } + + for i := range deps { + if deps[i].Kind != util.PersistentVolumeClaimKind { + validDeps = append(validDeps, deps[i]) + continue + } + if volumeClaimTemplateNames.Has(deps[i].Name) { + continue + } + validDeps = append(validDeps, deps[i]) + } + + return validDeps, nil } func getIngressDependencies(object *unstructured.Unstructured) ([]configv1alpha1.DependentObjectReference, error) { diff --git a/pkg/resourceinterpreter/default/native/dependencies_test.go b/pkg/resourceinterpreter/default/native/dependencies_test.go index d9d5d310f1d4..94b6dc57c342 100644 --- a/pkg/resourceinterpreter/default/native/dependencies_test.go +++ b/pkg/resourceinterpreter/default/native/dependencies_test.go @@ -861,6 +861,39 @@ func Test_getStatefulSetDependencies(t *testing.T) { want: testPairs[2].dependentObjectReference, wantErr: false, }, + { + name: "statefulset with partial dependencies 4", + object: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "apps/v1", + "kind": "StatefulSet", + "metadata": map[string]interface{}{ + "name": "fake-statefulset", + "namespace": namespace, + }, + "spec": map[string]interface{}{ + "serviceName": "fake-service", + "selector": map[string]interface{}{ + "matchLabels": map[string]interface{}{ + "app": "fake", + }, + }, + "template": map[string]interface{}{ + "spec": testPairs[0].podSpecsWithDependencies.Object, + }, + "volumeClaimTemplates": []interface{}{ + map[string]interface{}{ + "metadata": map[string]interface{}{ + "name": "test-pvc", + }, + }, + }, + }, + }, + }, + want: testPairs[0].dependentObjectReference[:3], // remove the pvc dependency because it was found in the volumeClaimTemplates + wantErr: false, + }, } for i := range tests { diff --git a/pkg/resourceinterpreter/default/thirdparty/resourcecustomizations/apps.kruise.io/v1alpha1/DaemonSet/customizations.yaml b/pkg/resourceinterpreter/default/thirdparty/resourcecustomizations/apps.kruise.io/v1alpha1/DaemonSet/customizations.yaml index e01bbde7d282..8e4dd65cd984 100644 --- a/pkg/resourceinterpreter/default/thirdparty/resourcecustomizations/apps.kruise.io/v1alpha1/DaemonSet/customizations.yaml +++ b/pkg/resourceinterpreter/default/thirdparty/resourcecustomizations/apps.kruise.io/v1alpha1/DaemonSet/customizations.yaml @@ -1,166 +1,166 @@ -apiVersion: config.karmada.io/v1alpha1 -kind: ResourceInterpreterCustomization -metadata: - name: declarative-configuration-daemonset -spec: - target: - apiVersion: apps.kruise.io/v1alpha1 - kind: DaemonSet - customizations: - statusAggregation: - luaScript: > - function AggregateStatus(desiredObj, statusItems) - if desiredObj.status == nil then - desiredObj.status = {} - end - if desiredObj.metadata.generation == nil then - desiredObj.metadata.generation = 0 - end - - if desiredObj.status.observedGeneration == nil then - desiredObj.status.observedGeneration = 0 - end - - -- Initialize status fields if status doest not exist - -- If the DaemonSet is not spread to any cluster, its status also should be aggregated - if statusItems == nil then - desiredObj.status.observedGeneration = desiredObj.metadata.generation - desiredObj.status.currentNumberScheduled = 0 - desiredObj.status.numberMisscheduled = 0 - desiredObj.status.desiredNumberScheduled = 0 - desiredObj.status.numberReady = 0 - desiredObj.status.updatedNumberScheduled = 0 - desiredObj.status.numberAvailable = 0 - desiredObj.status.numberUnavailable = 0 - desiredObj.status.daemonSetHash = 0 - return desiredObj - end - - local generation = desiredObj.metadata.generation - local observedGeneration = desiredObj.status.observedGeneration - local currentNumberScheduled = 0 - local numberMisscheduled = 0 - local desiredNumberScheduled = 0 - local numberReady = 0 - local updatedNumberScheduled = 0 - local numberAvailable = 0 - local numberUnavailable = 0 - local daemonSetHash = 0 - - -- Count all members that their status is updated to the latest generation - local observedResourceTemplateGenerationCount = 0 - for i = 1, #statusItems do - if statusItems[i].status ~= nil and statusItems[i].status.currentNumberScheduled ~= nil then - currentNumberScheduled = currentNumberScheduled + statusItems[i].status.currentNumberScheduled - end - if statusItems[i].status ~= nil and statusItems[i].status.numberMisscheduled ~= nil then - numberMisscheduled = numberMisscheduled + statusItems[i].status.numberMisscheduled - end - if statusItems[i].status ~= nil and statusItems[i].status.desiredNumberScheduled ~= nil then - desiredNumberScheduled = desiredNumberScheduled + statusItems[i].status.desiredNumberScheduled - end - if statusItems[i].status ~= nil and statusItems[i].status.numberReady ~= nil then - numberReady = numberReady + statusItems[i].status.numberReady - end - if statusItems[i].status ~= nil and statusItems[i].status.updatedNumberScheduled ~= nil then - updatedNumberScheduled = updatedNumberScheduled + statusItems[i].status.updatedNumberScheduled - end - if statusItems[i].status ~= nil and statusItems[i].status.numberAvailable ~= nil then - numberAvailable = numberAvailable + statusItems[i].status.numberAvailable - end - if statusItems[i].status ~= nil and statusItems[i].status.numberUnavailable ~= nil then - numberUnavailable = numberUnavailable + statusItems[i].status.numberUnavailable - end - if statusItems[i].status ~= nil and statusItems[i].status.daemonSetHash ~= nil and statusItems[i].status.daemonSetHash ~= '' then - daemonSetHash = statusItems[i].status.daemonSetHash - end - - -- Check if the member's status is updated to the latest generation - local resourceTemplateGeneration = 0 - if statusItems[i].status ~= nil and statusItems[i].status.resourceTemplateGeneration ~= nil then - resourceTemplateGeneration = statusItems[i].status.resourceTemplateGeneration - end - local memberGeneration = 0 - if statusItems[i].status ~= nil and statusItems[i].status.generation ~= nil then - memberGeneration = statusItems[i].status.generation - end - local memberObservedGeneration = 0 - if statusItems[i].status ~= nil and statusItems[i].status.observedGeneration ~= nil then - memberObservedGeneration = statusItems[i].status.observedGeneration - end - if resourceTemplateGeneration == generation and memberGeneration == memberObservedGeneration then - observedResourceTemplateGenerationCount = observedResourceTemplateGenerationCount + 1 - end - end - - -- Update the observed generation based on the observedResourceTemplateGenerationCount - if observedResourceTemplateGenerationCount == #statusItems then - desiredObj.status.observedGeneration = generation - else - desiredObj.status.observedGeneration = observedGeneration - end - - desiredObj.status.currentNumberScheduled = currentNumberScheduled - desiredObj.status.numberMisscheduled = numberMisscheduled - desiredObj.status.desiredNumberScheduled = desiredNumberScheduled - desiredObj.status.numberReady = numberReady - desiredObj.status.updatedNumberScheduled = updatedNumberScheduled - desiredObj.status.numberAvailable = numberAvailable - desiredObj.status.numberUnavailable = numberUnavailable - desiredObj.status.daemonSetHash = daemonSetHash - return desiredObj - end - statusReflection: - luaScript: > - function ReflectStatus(observedObj) - local status = {} - if observedObj == nil or observedObj.status == nil then - return status - end - status.observedGeneration = observedObj.status.observedGeneration - status.currentNumberScheduled = observedObj.status.currentNumberScheduled - status.numberMisscheduled = observedObj.status.numberMisscheduled - status.desiredNumberScheduled = observedObj.status.desiredNumberScheduled - status.numberReady = observedObj.status.numberReady - status.updatedNumberScheduled = observedObj.status.updatedNumberScheduled - status.numberAvailable = observedObj.status.numberAvailable - status.numberUnavailable = observedObj.status.numberUnavailable - status.daemonSetHash = observedObj.status.daemonSetHash - - -- handle member resource generation report - if observedObj.metadata == nil then - return status - end - status.generation = observedObj.metadata.generation - - -- handle resource template generation report - if observedObj.metadata.annotations == nil then - return status - end - local resourceTemplateGeneration = tonumber(observedObj.metadata.annotations["resourcetemplate.karmada.io/generation"]) - if resourceTemplateGeneration ~= nil then - status.resourceTemplateGeneration = resourceTemplateGeneration - end - return status - end - healthInterpretation: - luaScript: > - function InterpretHealth(observedObj) - if observedObj.status.observedGeneration ~= observedObj.metadata.generation then - return false - end - if observedObj.status.updatedNumberScheduled < observedObj.status.desiredNumberScheduled then - return false - end - if observedObj.status.numberAvailable < observedObj.status.updatedNumberScheduled then - return false - end - return true - end - dependencyInterpretation: - luaScript: > - local kube = require("kube") - function GetDependencies(desiredObj) - refs = kube.getPodDependencies(desiredObj.spec.template, desiredObj.metadata.namespace) - return refs - end +apiVersion: config.karmada.io/v1alpha1 +kind: ResourceInterpreterCustomization +metadata: + name: declarative-configuration-daemonset +spec: + target: + apiVersion: apps.kruise.io/v1alpha1 + kind: DaemonSet + customizations: + statusAggregation: + luaScript: > + function AggregateStatus(desiredObj, statusItems) + if desiredObj.status == nil then + desiredObj.status = {} + end + if desiredObj.metadata.generation == nil then + desiredObj.metadata.generation = 0 + end + + if desiredObj.status.observedGeneration == nil then + desiredObj.status.observedGeneration = 0 + end + + -- Initialize status fields if status doest not exist + -- If the DaemonSet is not spread to any cluster, its status also should be aggregated + if statusItems == nil then + desiredObj.status.observedGeneration = desiredObj.metadata.generation + desiredObj.status.currentNumberScheduled = 0 + desiredObj.status.numberMisscheduled = 0 + desiredObj.status.desiredNumberScheduled = 0 + desiredObj.status.numberReady = 0 + desiredObj.status.updatedNumberScheduled = 0 + desiredObj.status.numberAvailable = 0 + desiredObj.status.numberUnavailable = 0 + desiredObj.status.daemonSetHash = 0 + return desiredObj + end + + local generation = desiredObj.metadata.generation + local observedGeneration = desiredObj.status.observedGeneration + local currentNumberScheduled = 0 + local numberMisscheduled = 0 + local desiredNumberScheduled = 0 + local numberReady = 0 + local updatedNumberScheduled = 0 + local numberAvailable = 0 + local numberUnavailable = 0 + local daemonSetHash = 0 + + -- Count all members that their status is updated to the latest generation + local observedResourceTemplateGenerationCount = 0 + for i = 1, #statusItems do + if statusItems[i].status ~= nil and statusItems[i].status.currentNumberScheduled ~= nil then + currentNumberScheduled = currentNumberScheduled + statusItems[i].status.currentNumberScheduled + end + if statusItems[i].status ~= nil and statusItems[i].status.numberMisscheduled ~= nil then + numberMisscheduled = numberMisscheduled + statusItems[i].status.numberMisscheduled + end + if statusItems[i].status ~= nil and statusItems[i].status.desiredNumberScheduled ~= nil then + desiredNumberScheduled = desiredNumberScheduled + statusItems[i].status.desiredNumberScheduled + end + if statusItems[i].status ~= nil and statusItems[i].status.numberReady ~= nil then + numberReady = numberReady + statusItems[i].status.numberReady + end + if statusItems[i].status ~= nil and statusItems[i].status.updatedNumberScheduled ~= nil then + updatedNumberScheduled = updatedNumberScheduled + statusItems[i].status.updatedNumberScheduled + end + if statusItems[i].status ~= nil and statusItems[i].status.numberAvailable ~= nil then + numberAvailable = numberAvailable + statusItems[i].status.numberAvailable + end + if statusItems[i].status ~= nil and statusItems[i].status.numberUnavailable ~= nil then + numberUnavailable = numberUnavailable + statusItems[i].status.numberUnavailable + end + if statusItems[i].status ~= nil and statusItems[i].status.daemonSetHash ~= nil and statusItems[i].status.daemonSetHash ~= '' then + daemonSetHash = statusItems[i].status.daemonSetHash + end + + -- Check if the member's status is updated to the latest generation + local resourceTemplateGeneration = 0 + if statusItems[i].status ~= nil and statusItems[i].status.resourceTemplateGeneration ~= nil then + resourceTemplateGeneration = statusItems[i].status.resourceTemplateGeneration + end + local memberGeneration = 0 + if statusItems[i].status ~= nil and statusItems[i].status.generation ~= nil then + memberGeneration = statusItems[i].status.generation + end + local memberObservedGeneration = 0 + if statusItems[i].status ~= nil and statusItems[i].status.observedGeneration ~= nil then + memberObservedGeneration = statusItems[i].status.observedGeneration + end + if resourceTemplateGeneration == generation and memberGeneration == memberObservedGeneration then + observedResourceTemplateGenerationCount = observedResourceTemplateGenerationCount + 1 + end + end + + -- Update the observed generation based on the observedResourceTemplateGenerationCount + if observedResourceTemplateGenerationCount == #statusItems then + desiredObj.status.observedGeneration = generation + else + desiredObj.status.observedGeneration = observedGeneration + end + + desiredObj.status.currentNumberScheduled = currentNumberScheduled + desiredObj.status.numberMisscheduled = numberMisscheduled + desiredObj.status.desiredNumberScheduled = desiredNumberScheduled + desiredObj.status.numberReady = numberReady + desiredObj.status.updatedNumberScheduled = updatedNumberScheduled + desiredObj.status.numberAvailable = numberAvailable + desiredObj.status.numberUnavailable = numberUnavailable + desiredObj.status.daemonSetHash = daemonSetHash + return desiredObj + end + statusReflection: + luaScript: > + function ReflectStatus(observedObj) + local status = {} + if observedObj == nil or observedObj.status == nil then + return status + end + status.observedGeneration = observedObj.status.observedGeneration + status.currentNumberScheduled = observedObj.status.currentNumberScheduled + status.numberMisscheduled = observedObj.status.numberMisscheduled + status.desiredNumberScheduled = observedObj.status.desiredNumberScheduled + status.numberReady = observedObj.status.numberReady + status.updatedNumberScheduled = observedObj.status.updatedNumberScheduled + status.numberAvailable = observedObj.status.numberAvailable + status.numberUnavailable = observedObj.status.numberUnavailable + status.daemonSetHash = observedObj.status.daemonSetHash + + -- handle member resource generation report + if observedObj.metadata == nil then + return status + end + status.generation = observedObj.metadata.generation + + -- handle resource template generation report + if observedObj.metadata.annotations == nil then + return status + end + local resourceTemplateGeneration = tonumber(observedObj.metadata.annotations["resourcetemplate.karmada.io/generation"]) + if resourceTemplateGeneration ~= nil then + status.resourceTemplateGeneration = resourceTemplateGeneration + end + return status + end + healthInterpretation: + luaScript: > + function InterpretHealth(observedObj) + if observedObj.status.observedGeneration ~= observedObj.metadata.generation then + return false + end + if observedObj.status.updatedNumberScheduled < observedObj.status.desiredNumberScheduled then + return false + end + if observedObj.status.numberAvailable < observedObj.status.updatedNumberScheduled then + return false + end + return true + end + dependencyInterpretation: + luaScript: > + local kube = require("kube") + function GetDependencies(desiredObj) + refs = kube.getPodDependencies(desiredObj.spec.template, desiredObj.metadata.namespace) + return refs + end diff --git a/pkg/resourceinterpreter/default/thirdparty/resourcecustomizations/kyverno.io/v1/Policy/testdata/desired-policy.yaml b/pkg/resourceinterpreter/default/thirdparty/resourcecustomizations/kyverno.io/v1/Policy/testdata/desired-policy.yaml index f69acb336662..3f6fe0b0be33 100644 --- a/pkg/resourceinterpreter/default/thirdparty/resourcecustomizations/kyverno.io/v1/Policy/testdata/desired-policy.yaml +++ b/pkg/resourceinterpreter/default/thirdparty/resourcecustomizations/kyverno.io/v1/Policy/testdata/desired-policy.yaml @@ -1,20 +1,20 @@ -apiVersion: kyverno.io/v1 -kind: Policy -metadata: - name: sample - namespace: test-policy -spec: - validationFailureAction: Enforce - rules: - - name: require-pod-purpose-label - match: - any: - - resources: - kinds: - - Pod - validate: - message: "You must have label `purpose` with value `production` set on all new Pod in test-policy Namespace." - pattern: - metadata: - labels: - purpose: production +apiVersion: kyverno.io/v1 +kind: Policy +metadata: + name: sample + namespace: test-policy +spec: + validationFailureAction: Enforce + rules: + - name: require-pod-purpose-label + match: + any: + - resources: + kinds: + - Pod + validate: + message: "You must have label `purpose` with value `production` set on all new Pod in test-policy Namespace." + pattern: + metadata: + labels: + purpose: production diff --git a/pkg/resourceinterpreter/default/thirdparty/resourcecustomizations/kyverno.io/v1/Policy/testdata/observed-policy.yaml b/pkg/resourceinterpreter/default/thirdparty/resourcecustomizations/kyverno.io/v1/Policy/testdata/observed-policy.yaml index b972ee9804c4..64050a5e4237 100644 --- a/pkg/resourceinterpreter/default/thirdparty/resourcecustomizations/kyverno.io/v1/Policy/testdata/observed-policy.yaml +++ b/pkg/resourceinterpreter/default/thirdparty/resourcecustomizations/kyverno.io/v1/Policy/testdata/observed-policy.yaml @@ -1,86 +1,86 @@ -apiVersion: kyverno.io/v1 -kind: Policy -metadata: - name: sample - namespace: test-policy -spec: - validationFailureAction: Enforce - rules: - - name: require-pod-purpose-label - match: - any: - - resources: - kinds: - - Pod - validate: - message: "You must have label `purpose` with value `production` set on all new Pod in test-policy Namespace." - pattern: - metadata: - labels: - purpose: production -status: - autogen: - rules: - - exclude: - resources: {} - generate: - clone: {} - cloneList: {} - match: - any: - - resources: - kinds: - - DaemonSet - - Deployment - - Job - - StatefulSet - - ReplicaSet - - ReplicationController - resources: {} - mutate: {} - name: autogen-require-pod-purpose-label - validate: - message: You must have label `purpose` with value `production` set on all - new Pod in test-policy Namespace. - pattern: - spec: - template: - metadata: - labels: - purpose: production - - exclude: - resources: {} - generate: - clone: {} - cloneList: {} - match: - any: - - resources: - kinds: - - CronJob - resources: {} - mutate: {} - name: autogen-cronjob-require-pod-purpose-label - validate: - message: You must have label `purpose` with value `production` set on all - new Pod in test-policy Namespace. - pattern: - spec: - jobTemplate: - spec: - template: - metadata: - labels: - purpose: production - conditions: - - lastTransitionTime: "2023-05-07T09:19:06Z" - message: "" - reason: Succeeded - status: "True" - type: Ready - ready: true - rulecount: - generate: 0 - mutate: 0 - validate: 1 - verifyimages: 0 +apiVersion: kyverno.io/v1 +kind: Policy +metadata: + name: sample + namespace: test-policy +spec: + validationFailureAction: Enforce + rules: + - name: require-pod-purpose-label + match: + any: + - resources: + kinds: + - Pod + validate: + message: "You must have label `purpose` with value `production` set on all new Pod in test-policy Namespace." + pattern: + metadata: + labels: + purpose: production +status: + autogen: + rules: + - exclude: + resources: {} + generate: + clone: {} + cloneList: {} + match: + any: + - resources: + kinds: + - DaemonSet + - Deployment + - Job + - StatefulSet + - ReplicaSet + - ReplicationController + resources: {} + mutate: {} + name: autogen-require-pod-purpose-label + validate: + message: You must have label `purpose` with value `production` set on all + new Pod in test-policy Namespace. + pattern: + spec: + template: + metadata: + labels: + purpose: production + - exclude: + resources: {} + generate: + clone: {} + cloneList: {} + match: + any: + - resources: + kinds: + - CronJob + resources: {} + mutate: {} + name: autogen-cronjob-require-pod-purpose-label + validate: + message: You must have label `purpose` with value `production` set on all + new Pod in test-policy Namespace. + pattern: + spec: + jobTemplate: + spec: + template: + metadata: + labels: + purpose: production + conditions: + - lastTransitionTime: "2023-05-07T09:19:06Z" + message: "" + reason: Succeeded + status: "True" + type: Ready + ready: true + rulecount: + generate: 0 + mutate: 0 + validate: 1 + verifyimages: 0 diff --git a/pkg/resourceinterpreter/default/thirdparty/resourcecustomizations/kyverno.io/v1/Policy/testdata/status-file.yaml b/pkg/resourceinterpreter/default/thirdparty/resourcecustomizations/kyverno.io/v1/Policy/testdata/status-file.yaml index 17cda3cd42db..0f16a7b8ce39 100644 --- a/pkg/resourceinterpreter/default/thirdparty/resourcecustomizations/kyverno.io/v1/Policy/testdata/status-file.yaml +++ b/pkg/resourceinterpreter/default/thirdparty/resourcecustomizations/kyverno.io/v1/Policy/testdata/status-file.yaml @@ -1,119 +1,119 @@ -applied: true -clusterName: member2 -health: Healthy -status: - autogen: - rules: - - exclude: [] - generate: [] - match: - any: - - resources: - kinds: - - DaemonSet - - Deployment - - Job - - StatefulSet - - ReplicaSet - - ReplicationController - name: autogen-require-pod-purpose-label - validate: - message: You must have label `purpose` with value `production` set on - all new Pod in test-policy Namespace. - pattern: - spec: - template: - metadata: - labels: - purpose: production - - exclude: [] - generate: [] - match: - any: - - resources: - kinds: - - CronJob - name: autogen-cronjob-require-pod-purpose-label - validate: - message: You must have label `purpose` with value `production` set on - all new Pod in test-policy Namespace. - pattern: - spec: - jobTemplate: - spec: - template: - metadata: - labels: - purpose: production - conditions: - - lastTransitionTime: "2023-05-07T09:19:06Z" - message: "" - reason: Succeeded - status: "True" - type: Ready - ready: true - rulecount: - generate: 0 - mutate: 0 - validate: 1 - verifyimages: 0 ---- -applied: true -clusterName: member3 -health: Healthy -status: - autogen: - rules: - - exclude: [] - generate: [] - match: - any: - - resources: - kinds: - - DaemonSet - - Deployment - - Job - - StatefulSet - - ReplicaSet - - ReplicationController - name: autogen-require-pod-purpose-label - validate: - message: You must have label `purpose` with value `production` set on - all new Pod in test-policy Namespace. - pattern: - spec: - template: - metadata: - labels: - purpose: production - - exclude: [] - generate: [] - match: - any: - - resources: - kinds: - - CronJob - name: autogen-cronjob-require-pod-purpose-label - validate: - message: You must have label `purpose` with value `production` set on - all new Pod in test-policy Namespace. - pattern: - spec: - jobTemplate: - spec: - template: - metadata: - labels: - purpose: production - conditions: - - lastTransitionTime: "2023-05-07T09:19:06Z" - message: "" - reason: Succeeded - status: "True" - type: Ready - ready: true - rulecount: - generate: 0 - mutate: 0 - validate: 1 - verifyimages: 0 +applied: true +clusterName: member2 +health: Healthy +status: + autogen: + rules: + - exclude: [] + generate: [] + match: + any: + - resources: + kinds: + - DaemonSet + - Deployment + - Job + - StatefulSet + - ReplicaSet + - ReplicationController + name: autogen-require-pod-purpose-label + validate: + message: You must have label `purpose` with value `production` set on + all new Pod in test-policy Namespace. + pattern: + spec: + template: + metadata: + labels: + purpose: production + - exclude: [] + generate: [] + match: + any: + - resources: + kinds: + - CronJob + name: autogen-cronjob-require-pod-purpose-label + validate: + message: You must have label `purpose` with value `production` set on + all new Pod in test-policy Namespace. + pattern: + spec: + jobTemplate: + spec: + template: + metadata: + labels: + purpose: production + conditions: + - lastTransitionTime: "2023-05-07T09:19:06Z" + message: "" + reason: Succeeded + status: "True" + type: Ready + ready: true + rulecount: + generate: 0 + mutate: 0 + validate: 1 + verifyimages: 0 +--- +applied: true +clusterName: member3 +health: Healthy +status: + autogen: + rules: + - exclude: [] + generate: [] + match: + any: + - resources: + kinds: + - DaemonSet + - Deployment + - Job + - StatefulSet + - ReplicaSet + - ReplicationController + name: autogen-require-pod-purpose-label + validate: + message: You must have label `purpose` with value `production` set on + all new Pod in test-policy Namespace. + pattern: + spec: + template: + metadata: + labels: + purpose: production + - exclude: [] + generate: [] + match: + any: + - resources: + kinds: + - CronJob + name: autogen-cronjob-require-pod-purpose-label + validate: + message: You must have label `purpose` with value `production` set on + all new Pod in test-policy Namespace. + pattern: + spec: + jobTemplate: + spec: + template: + metadata: + labels: + purpose: production + conditions: + - lastTransitionTime: "2023-05-07T09:19:06Z" + message: "" + reason: Succeeded + status: "True" + type: Ready + ready: true + rulecount: + generate: 0 + mutate: 0 + validate: 1 + verifyimages: 0 diff --git a/pkg/scheduler/cache/cache_test.go b/pkg/scheduler/cache/cache_test.go new file mode 100644 index 000000000000..8694bd693e00 --- /dev/null +++ b/pkg/scheduler/cache/cache_test.go @@ -0,0 +1,256 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "errors" + "testing" + + "github.com/stretchr/testify/assert" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/util/sets" + + clusterv1alpha1 "github.com/karmada-io/karmada/pkg/apis/cluster/v1alpha1" + clusterlister "github.com/karmada-io/karmada/pkg/generated/listers/cluster/v1alpha1" +) + +func TestNewCache(t *testing.T) { + tests := []struct { + name string + clusterLister clusterlister.ClusterLister + }{ + { + name: "Create cache with empty mock lister", + clusterLister: &mockClusterLister{}, + }, + { + name: "Create cache with non-empty mock lister", + clusterLister: &mockClusterLister{clusters: []*clusterv1alpha1.Cluster{{}}}, + }, + { + name: "Create cache with nil lister", + clusterLister: nil, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + cache := NewCache(tt.clusterLister) + + assert.NotNil(t, cache, "NewCache() returned nil cache") + + // Assert that the cache is of the correct type + sc, ok := cache.(*schedulerCache) + assert.True(t, ok, "NewCache() did not return a *schedulerCache") + + // Assert that the clusterLister is correctly set + assert.Equal(t, tt.clusterLister, sc.clusterLister, "clusterLister not set correctly") + }) + } +} + +func TestSnapshot(t *testing.T) { + tests := []struct { + name string + clusters []*clusterv1alpha1.Cluster + wantTotal int + wantReady int + wantReadyNames sets.Set[string] + }{ + { + name: "empty cluster list", + clusters: []*clusterv1alpha1.Cluster{}, + wantTotal: 0, + wantReady: 0, + wantReadyNames: sets.New[string](), + }, + { + name: "mixed ready and not ready clusters", + clusters: []*clusterv1alpha1.Cluster{ + { + ObjectMeta: metav1.ObjectMeta{Name: "cluster1"}, + Status: clusterv1alpha1.ClusterStatus{Conditions: []metav1.Condition{{Type: clusterv1alpha1.ClusterConditionReady, Status: metav1.ConditionTrue}}}, + }, + { + ObjectMeta: metav1.ObjectMeta{Name: "cluster2"}, + Status: clusterv1alpha1.ClusterStatus{Conditions: []metav1.Condition{{Type: clusterv1alpha1.ClusterConditionReady, Status: metav1.ConditionFalse}}}, + }, + { + ObjectMeta: metav1.ObjectMeta{Name: "cluster3"}, + Status: clusterv1alpha1.ClusterStatus{Conditions: []metav1.Condition{{Type: clusterv1alpha1.ClusterConditionReady, Status: metav1.ConditionTrue}}}, + }, + }, + wantTotal: 3, + wantReady: 2, + wantReadyNames: sets.New("cluster1", "cluster3"), + }, + { + name: "all ready clusters", + clusters: []*clusterv1alpha1.Cluster{ + { + ObjectMeta: metav1.ObjectMeta{Name: "cluster1"}, + Status: clusterv1alpha1.ClusterStatus{Conditions: []metav1.Condition{{Type: clusterv1alpha1.ClusterConditionReady, Status: metav1.ConditionTrue}}}, + }, + { + ObjectMeta: metav1.ObjectMeta{Name: "cluster2"}, + Status: clusterv1alpha1.ClusterStatus{Conditions: []metav1.Condition{{Type: clusterv1alpha1.ClusterConditionReady, Status: metav1.ConditionTrue}}}, + }, + }, + wantTotal: 2, + wantReady: 2, + wantReadyNames: sets.New("cluster1", "cluster2"), + }, + { + name: "all not ready clusters", + clusters: []*clusterv1alpha1.Cluster{ + { + ObjectMeta: metav1.ObjectMeta{Name: "cluster1"}, + Status: clusterv1alpha1.ClusterStatus{Conditions: []metav1.Condition{{Type: clusterv1alpha1.ClusterConditionReady, Status: metav1.ConditionFalse}}}, + }, + { + ObjectMeta: metav1.ObjectMeta{Name: "cluster2"}, + Status: clusterv1alpha1.ClusterStatus{Conditions: []metav1.Condition{{Type: clusterv1alpha1.ClusterConditionReady, Status: metav1.ConditionFalse}}}, + }, + }, + wantTotal: 2, + wantReady: 0, + wantReadyNames: sets.New[string](), + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + mockLister := &mockClusterLister{clusters: tt.clusters} + cache := NewCache(mockLister) + snapshot := cache.Snapshot() + + assert.Equal(t, tt.wantTotal, snapshot.NumOfClusters(), "Incorrect number of total clusters") + assert.Equal(t, tt.wantTotal, len(snapshot.GetClusters()), "Incorrect number of clusters returned by GetClusters()") + assert.Equal(t, tt.wantReady, len(snapshot.GetReadyClusters()), "Incorrect number of ready clusters") + assert.Equal(t, tt.wantReadyNames, snapshot.GetReadyClusterNames(), "Incorrect ready cluster names") + + // Test GetCluster for existing clusters + for _, cluster := range tt.clusters { + gotCluster := snapshot.GetCluster(cluster.Name) + assert.NotNil(t, gotCluster, "GetCluster(%s) returned nil", cluster.Name) + assert.Equal(t, cluster, gotCluster.Cluster(), "GetCluster(%s) returned incorrect cluster", cluster.Name) + } + + // Test GetCluster for non-existent cluster + assert.Nil(t, snapshot.GetCluster("non-existent-cluster"), "GetCluster(non-existent-cluster) should return nil") + + // Verify that the snapshot is a deep copy + for i, cluster := range tt.clusters { + snapshotCluster := snapshot.GetClusters()[i].Cluster() + assert.Equal(t, cluster, snapshotCluster, "Snapshot cluster should be equal to original") + } + }) + } +} + +func TestNewEmptySnapshot(t *testing.T) { + snapshot := NewEmptySnapshot() + assert.Equal(t, 0, snapshot.NumOfClusters(), "New empty snapshot should have 0 clusters") + assert.Empty(t, snapshot.GetClusters(), "New empty snapshot should return empty cluster list") + assert.Empty(t, snapshot.GetReadyClusters(), "New empty snapshot should return empty ready cluster list") + assert.Empty(t, snapshot.GetReadyClusterNames(), "New empty snapshot should return empty ready cluster names") + assert.Nil(t, snapshot.GetCluster("any-cluster"), "GetCluster on empty snapshot should return nil") +} + +func TestAddUpdateDeleteCluster(t *testing.T) { + tests := []struct { + name string + action func(*schedulerCache, *clusterv1alpha1.Cluster) + }{ + { + name: "AddCluster", + action: func(cache *schedulerCache, cluster *clusterv1alpha1.Cluster) { + cache.AddCluster(cluster) + }, + }, + { + name: "UpdateCluster", + action: func(cache *schedulerCache, cluster *clusterv1alpha1.Cluster) { + cache.UpdateCluster(cluster) + }, + }, + { + name: "DeleteCluster", + action: func(cache *schedulerCache, cluster *clusterv1alpha1.Cluster) { + cache.DeleteCluster(cluster) + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + mockLister := &mockClusterLister{} + cache := NewCache(mockLister).(*schedulerCache) + + cluster := &clusterv1alpha1.Cluster{ + ObjectMeta: metav1.ObjectMeta{Name: "test-cluster"}, + } + + tt.action(cache, cluster) + + // Verify that the action doesn't modify the cache + assert.Empty(t, mockLister.clusters, "SchedulerCache.%s() modified the cache, which it shouldn't", tt.name) + }) + } +} + +func TestSnapshotError(t *testing.T) { + // mock error + mockError := errors.New("mock list error") + + mockLister := &mockClusterLister{ + clusters: nil, + err: mockError, + } + + cache := NewCache(mockLister) + + snapshot := cache.Snapshot() + + // Assert that the snapshot is empty + assert.Equal(t, 0, snapshot.NumOfClusters(), "Snapshot should be empty when there's an error") + assert.Empty(t, snapshot.GetClusters(), "Snapshot should have no clusters when there's an error") +} + +// Mock Implementations + +type mockClusterLister struct { + clusters []*clusterv1alpha1.Cluster + err error +} + +func (m *mockClusterLister) List(_ labels.Selector) ([]*clusterv1alpha1.Cluster, error) { + return m.clusters, m.err +} + +func (m *mockClusterLister) Get(name string) (*clusterv1alpha1.Cluster, error) { + if m.err != nil { + return nil, m.err + } + for _, cluster := range m.clusters { + if cluster.Name == name { + return cluster, nil + } + } + return nil, nil +} diff --git a/pkg/scheduler/core/common_test.go b/pkg/scheduler/core/common_test.go new file mode 100644 index 000000000000..26fee080292e --- /dev/null +++ b/pkg/scheduler/core/common_test.go @@ -0,0 +1,222 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package core + +import ( + "testing" + + "github.com/stretchr/testify/assert" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + clusterv1alpha1 "github.com/karmada-io/karmada/pkg/apis/cluster/v1alpha1" + policyv1alpha1 "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1" + workv1alpha2 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2" + "github.com/karmada-io/karmada/pkg/scheduler/framework" +) + +func TestSelectClusters(t *testing.T) { + tests := []struct { + name string + clustersScore framework.ClusterScoreList + placement *policyv1alpha1.Placement + spec *workv1alpha2.ResourceBindingSpec + expectedResult []*clusterv1alpha1.Cluster + expectedError bool + }{ + { + name: "select all clusters", + clustersScore: framework.ClusterScoreList{ + {Cluster: &clusterv1alpha1.Cluster{ObjectMeta: metav1.ObjectMeta{Name: "cluster1"}}, Score: 10}, + {Cluster: &clusterv1alpha1.Cluster{ObjectMeta: metav1.ObjectMeta{Name: "cluster2"}}, Score: 20}, + }, + placement: &policyv1alpha1.Placement{}, + spec: &workv1alpha2.ResourceBindingSpec{}, + expectedResult: []*clusterv1alpha1.Cluster{ + {ObjectMeta: metav1.ObjectMeta{Name: "cluster1"}}, + {ObjectMeta: metav1.ObjectMeta{Name: "cluster2"}}, + }, + expectedError: false, + }, + { + name: "select top 1 cluster", + clustersScore: framework.ClusterScoreList{ + {Cluster: &clusterv1alpha1.Cluster{ObjectMeta: metav1.ObjectMeta{Name: "cluster1"}}, Score: 10}, + {Cluster: &clusterv1alpha1.Cluster{ObjectMeta: metav1.ObjectMeta{Name: "cluster2"}}, Score: 20}, + {Cluster: &clusterv1alpha1.Cluster{ObjectMeta: metav1.ObjectMeta{Name: "cluster3"}}, Score: 15}, + }, + placement: &policyv1alpha1.Placement{ + ClusterAffinity: &policyv1alpha1.ClusterAffinity{ + ClusterNames: []string{"cluster1", "cluster2", "cluster3"}, + }, + SpreadConstraints: []policyv1alpha1.SpreadConstraint{ + { + SpreadByField: policyv1alpha1.SpreadByFieldCluster, + MaxGroups: 1, + }, + }, + }, + spec: &workv1alpha2.ResourceBindingSpec{}, + expectedResult: []*clusterv1alpha1.Cluster{ + {ObjectMeta: metav1.ObjectMeta{Name: "cluster2"}}, + }, + expectedError: false, + }, + { + name: "select clusters with affinity", + clustersScore: framework.ClusterScoreList{ + {Cluster: &clusterv1alpha1.Cluster{ObjectMeta: metav1.ObjectMeta{Name: "cluster1"}}, Score: 10}, + {Cluster: &clusterv1alpha1.Cluster{ObjectMeta: metav1.ObjectMeta{Name: "cluster3"}}, Score: 15}, + }, + placement: &policyv1alpha1.Placement{ + ClusterAffinity: &policyv1alpha1.ClusterAffinity{ + ClusterNames: []string{"cluster1", "cluster3"}, + }, + }, + spec: &workv1alpha2.ResourceBindingSpec{}, + expectedResult: []*clusterv1alpha1.Cluster{ + {ObjectMeta: metav1.ObjectMeta{Name: "cluster1"}}, + {ObjectMeta: metav1.ObjectMeta{Name: "cluster3"}}, + }, + expectedError: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result, err := SelectClusters(tt.clustersScore, tt.placement, tt.spec) + + if tt.expectedError { + assert.Error(t, err) + } else { + assert.NoError(t, err) + assert.Equal(t, len(tt.expectedResult), len(result)) + + expectedNames := make([]string, len(tt.expectedResult)) + for i, cluster := range tt.expectedResult { + expectedNames[i] = cluster.Name + } + + actualNames := make([]string, len(result)) + for i, cluster := range result { + actualNames[i] = cluster.Name + } + + assert.ElementsMatch(t, expectedNames, actualNames) + } + }) + } +} + +func TestAssignReplicas(t *testing.T) { + tests := []struct { + name string + clusters []*clusterv1alpha1.Cluster + spec *workv1alpha2.ResourceBindingSpec + status *workv1alpha2.ResourceBindingStatus + expectedResult []workv1alpha2.TargetCluster + expectedError bool + expectedErrMsg string + }{ + { + name: "Assign replicas to single cluster", + clusters: []*clusterv1alpha1.Cluster{ + {ObjectMeta: metav1.ObjectMeta{Name: "cluster1"}}, + }, + spec: &workv1alpha2.ResourceBindingSpec{ + Replicas: 3, + Placement: &policyv1alpha1.Placement{ + ReplicaScheduling: &policyv1alpha1.ReplicaSchedulingStrategy{ + ReplicaSchedulingType: policyv1alpha1.ReplicaSchedulingTypeDivided, + ReplicaDivisionPreference: policyv1alpha1.ReplicaDivisionPreferenceWeighted, + }, + }, + }, + status: &workv1alpha2.ResourceBindingStatus{}, + expectedResult: []workv1alpha2.TargetCluster{{Name: "cluster1", Replicas: 3}}, + expectedError: false, + }, + { + name: "No clusters available", + clusters: []*clusterv1alpha1.Cluster{}, + spec: &workv1alpha2.ResourceBindingSpec{Replicas: 1}, + status: &workv1alpha2.ResourceBindingStatus{}, + expectedResult: nil, + expectedError: true, + expectedErrMsg: "no clusters available to schedule", + }, + { + name: "Non-workload scenario (zero replicas)", + clusters: []*clusterv1alpha1.Cluster{ + {ObjectMeta: metav1.ObjectMeta{Name: "cluster1"}}, + {ObjectMeta: metav1.ObjectMeta{Name: "cluster2"}}, + }, + spec: &workv1alpha2.ResourceBindingSpec{ + Replicas: 0, + }, + status: &workv1alpha2.ResourceBindingStatus{}, + expectedResult: []workv1alpha2.TargetCluster{{Name: "cluster1"}, {Name: "cluster2"}}, + expectedError: false, + }, + { + name: "Unsupported replica scheduling strategy", + clusters: []*clusterv1alpha1.Cluster{ + {Spec: clusterv1alpha1.ClusterSpec{ID: "cluster1"}}, + }, + spec: &workv1alpha2.ResourceBindingSpec{ + Replicas: 3, + Placement: &policyv1alpha1.Placement{ + ReplicaScheduling: &policyv1alpha1.ReplicaSchedulingStrategy{ + ReplicaSchedulingType: "UnsupportedType", + ReplicaDivisionPreference: "UnsupportedPreference", + }, + }, + }, + status: &workv1alpha2.ResourceBindingStatus{}, + expectedResult: nil, + expectedError: true, + expectedErrMsg: "unsupported replica scheduling strategy", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result, err := AssignReplicas(tt.clusters, tt.spec, tt.status) + + if tt.expectedError { + assert.Error(t, err) + if tt.expectedErrMsg != "" { + assert.Contains(t, err.Error(), tt.expectedErrMsg) + } + } else { + assert.NoError(t, err) + assert.Equal(t, len(tt.expectedResult), len(result)) + + // Check if the total assigned replicas match the spec + totalReplicas := int32(0) + for i, cluster := range result { + assert.Equal(t, tt.expectedResult[i].Name, cluster.Name) + assert.Equal(t, tt.expectedResult[i].Replicas, cluster.Replicas) + totalReplicas += cluster.Replicas + } + + if tt.spec.Replicas > 0 { + assert.Equal(t, tt.spec.Replicas, totalReplicas) + } + } + }) + } +} diff --git a/pkg/scheduler/event_handler_test.go b/pkg/scheduler/event_handler_test.go new file mode 100644 index 000000000000..fbf7088aeb7c --- /dev/null +++ b/pkg/scheduler/event_handler_test.go @@ -0,0 +1,453 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package scheduler + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/tools/cache" + + clusterv1alpha1 "github.com/karmada-io/karmada/pkg/apis/cluster/v1alpha1" + policyv1alpha1 "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1" + workv1alpha2 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2" +) + +func TestResourceBindingEventFilter(t *testing.T) { + testCases := []struct { + name string + schedulerName string + obj interface{} + expectedResult bool + }{ + { + name: "ResourceBinding: Matching scheduler name, no labels", + schedulerName: "test-scheduler", + obj: createResourceBinding("test-rb", "test-scheduler", nil), + expectedResult: false, + }, + { + name: "ResourceBinding: Non-matching scheduler name", + schedulerName: "test-scheduler", + obj: createResourceBinding("test-rb", "other-scheduler", nil), + expectedResult: false, + }, + { + name: "ResourceBinding: Matching scheduler name, with PropagationPolicyPermanentIDLabel", + schedulerName: "test-scheduler", + obj: createResourceBinding("test-rb", "test-scheduler", map[string]string{ + policyv1alpha1.PropagationPolicyPermanentIDLabel: "test-id", + }), + expectedResult: true, + }, + { + name: "ResourceBinding: Matching scheduler name, with ClusterPropagationPolicyPermanentIDLabel", + schedulerName: "test-scheduler", + obj: createResourceBinding("test-rb", "test-scheduler", map[string]string{ + policyv1alpha1.ClusterPropagationPolicyPermanentIDLabel: "test-id", + }), + expectedResult: true, + }, + { + name: "ResourceBinding: Matching scheduler name, with BindingManagedByLabel", + schedulerName: "test-scheduler", + obj: createResourceBinding("test-rb", "test-scheduler", map[string]string{ + workv1alpha2.BindingManagedByLabel: "test-manager", + }), + expectedResult: true, + }, + { + name: "ResourceBinding: Matching scheduler name, with empty PropagationPolicyPermanentIDLabel", + schedulerName: "test-scheduler", + obj: createResourceBinding("test-rb", "test-scheduler", map[string]string{ + policyv1alpha1.PropagationPolicyPermanentIDLabel: "", + }), + expectedResult: false, + }, + { + name: "ClusterResourceBinding: Matching scheduler name, no labels", + schedulerName: "test-scheduler", + obj: createClusterResourceBinding("test-crb", "test-scheduler", nil), + expectedResult: false, + }, + { + name: "ClusterResourceBinding: Non-matching scheduler name", + schedulerName: "test-scheduler", + obj: createClusterResourceBinding("test-crb", "other-scheduler", nil), + expectedResult: false, + }, + { + name: "ClusterResourceBinding: Matching scheduler name, with ClusterPropagationPolicyPermanentIDLabel", + schedulerName: "test-scheduler", + obj: createClusterResourceBinding("test-crb", "test-scheduler", map[string]string{ + policyv1alpha1.ClusterPropagationPolicyPermanentIDLabel: "test-id", + }), + expectedResult: true, + }, + { + name: "Nil object", + schedulerName: "test-scheduler", + obj: nil, + expectedResult: false, + }, + { + name: "Invalid object type", + schedulerName: "test-scheduler", + obj: "not-a-valid-object", + expectedResult: false, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + s := &Scheduler{ + schedulerName: tc.schedulerName, + } + result := s.resourceBindingEventFilter(tc.obj) + assert.Equal(t, tc.expectedResult, result, "Test case: %s", tc.name) + }) + } +} + +func TestAddCluster(t *testing.T) { + tests := []struct { + name string + enableSchedulerEstimator bool + obj interface{} + expectedAdded bool + expectedClusterName string + }{ + { + name: "valid cluster object with estimator enabled", + enableSchedulerEstimator: true, + obj: createCluster("test-cluster", 0, nil), + expectedAdded: true, + expectedClusterName: "test-cluster", + }, + { + name: "valid cluster object with estimator disabled", + enableSchedulerEstimator: false, + obj: createCluster("test-cluster-2", 0, nil), + expectedAdded: false, + expectedClusterName: "", + }, + { + name: "invalid object type", + enableSchedulerEstimator: true, + obj: &corev1.Pod{}, + expectedAdded: false, + expectedClusterName: "", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + mockWorker := &mockAsyncWorker{} + s := &Scheduler{ + enableSchedulerEstimator: tt.enableSchedulerEstimator, + schedulerEstimatorWorker: mockWorker, + } + + s.addCluster(tt.obj) + + if tt.expectedAdded { + assert.Equal(t, 1, mockWorker.addCount, "Worker Add should have been called once") + assert.Equal(t, tt.expectedClusterName, mockWorker.lastAdded, "Incorrect cluster name added") + } else { + assert.Equal(t, 0, mockWorker.addCount, "Worker Add should not have been called") + assert.Nil(t, mockWorker.lastAdded, "No cluster name should have been added") + } + + assert.Equal(t, 0, mockWorker.enqueueCount, "Worker Enqueue should not have been called") + assert.Nil(t, mockWorker.lastEnqueued, "No item should have been enqueued") + }) + } +} + +func TestUpdateCluster(t *testing.T) { + tests := []struct { + name string + enableSchedulerEstimator bool + oldObj interface{} + newObj interface{} + expectedEstimatorAdded bool + expectedReconcileAdded int + }{ + { + name: "valid cluster update with generation change", + enableSchedulerEstimator: true, + oldObj: createCluster("test-cluster", 1, nil), + newObj: createCluster("test-cluster", 2, nil), + expectedEstimatorAdded: true, + expectedReconcileAdded: 2, + }, + { + name: "valid cluster update with label change", + enableSchedulerEstimator: true, + oldObj: createCluster("test-cluster", 0, map[string]string{"old": "label"}), + newObj: createCluster("test-cluster", 0, map[string]string{"new": "label"}), + expectedEstimatorAdded: true, + expectedReconcileAdded: 2, + }, + { + name: "valid cluster update without changes", + enableSchedulerEstimator: true, + oldObj: createCluster("test-cluster", 0, nil), + newObj: createCluster("test-cluster", 0, nil), + expectedEstimatorAdded: true, + expectedReconcileAdded: 0, + }, + { + name: "invalid old object type", + enableSchedulerEstimator: true, + oldObj: &corev1.Pod{}, + newObj: createCluster("test-cluster", 0, nil), + expectedEstimatorAdded: false, + expectedReconcileAdded: 0, + }, + { + name: "invalid new object type", + enableSchedulerEstimator: true, + oldObj: createCluster("test-cluster", 0, nil), + newObj: &corev1.Pod{}, + expectedEstimatorAdded: false, + expectedReconcileAdded: 0, + }, + { + name: "both objects invalid", + enableSchedulerEstimator: true, + oldObj: &corev1.Pod{}, + newObj: &corev1.Pod{}, + expectedEstimatorAdded: false, + expectedReconcileAdded: 0, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + estimatorWorker := &mockAsyncWorker{} + reconcileWorker := &mockAsyncWorker{} + s := &Scheduler{ + enableSchedulerEstimator: tt.enableSchedulerEstimator, + schedulerEstimatorWorker: estimatorWorker, + clusterReconcileWorker: reconcileWorker, + } + + s.updateCluster(tt.oldObj, tt.newObj) + + // Check schedulerEstimatorWorker + if tt.expectedEstimatorAdded { + assert.Equal(t, 1, estimatorWorker.addCount, "Estimator worker Add should have been called once") + if cluster, ok := tt.newObj.(*clusterv1alpha1.Cluster); ok { + assert.Equal(t, cluster.Name, estimatorWorker.lastAdded, "Incorrect cluster name added to estimator worker") + } else { + t.Errorf("Expected newObj to be a Cluster, but it wasn't") + } + } else { + assert.Equal(t, 0, estimatorWorker.addCount, "Estimator worker Add should not have been called") + assert.Nil(t, estimatorWorker.lastAdded, "No cluster should have been added to estimator worker") + } + + // Check clusterReconcileWorker + assert.Equal(t, tt.expectedReconcileAdded, reconcileWorker.addCount, "Reconcile worker Add called unexpected number of times") + + if tt.expectedReconcileAdded > 0 { + lastAdded, ok := reconcileWorker.lastAdded.(*clusterv1alpha1.Cluster) + assert.True(t, ok, "Last added item is not a Cluster object") + if ok { + newCluster, newOk := tt.newObj.(*clusterv1alpha1.Cluster) + assert.True(t, newOk, "newObj is not a Cluster object") + if newOk { + assert.Equal(t, newCluster.Name, lastAdded.Name, "Incorrect cluster added to reconcile worker") + } + } + } else { + assert.Nil(t, reconcileWorker.lastAdded, "No cluster should have been added to reconcile worker") + } + }) + } +} + +func TestDeleteCluster(t *testing.T) { + tests := []struct { + name string + enableSchedulerEstimator bool + obj interface{} + expectedAdded bool + expectedClusterName string + }{ + { + name: "valid cluster object with estimator enabled", + enableSchedulerEstimator: true, + obj: createCluster("test-cluster", 0, nil), + expectedAdded: true, + expectedClusterName: "test-cluster", + }, + { + name: "valid cluster object with estimator disabled", + enableSchedulerEstimator: false, + obj: createCluster("test-cluster", 0, nil), + expectedAdded: false, + expectedClusterName: "", + }, + { + name: "deleted final state unknown with valid cluster", + enableSchedulerEstimator: true, + obj: cache.DeletedFinalStateUnknown{ + Key: "test-cluster", + Obj: createCluster("test-cluster", 0, nil), + }, + expectedAdded: true, + expectedClusterName: "test-cluster", + }, + { + name: "deleted final state unknown with invalid object", + enableSchedulerEstimator: true, + obj: cache.DeletedFinalStateUnknown{ + Key: "test-pod", + Obj: &corev1.Pod{}, + }, + expectedAdded: false, + expectedClusterName: "", + }, + { + name: "invalid object type", + enableSchedulerEstimator: true, + obj: &corev1.Pod{}, + expectedAdded: false, + expectedClusterName: "", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + worker := &mockAsyncWorker{} + s := &Scheduler{ + enableSchedulerEstimator: tt.enableSchedulerEstimator, + schedulerEstimatorWorker: worker, + } + + s.deleteCluster(tt.obj) + + if tt.expectedAdded { + assert.Equal(t, 1, worker.addCount, "Worker Add should have been called once") + assert.Equal(t, tt.expectedClusterName, worker.lastAdded, "Incorrect cluster name added to worker") + } else { + assert.Equal(t, 0, worker.addCount, "Worker Add should not have been called") + assert.Nil(t, worker.lastAdded, "No cluster name should have been added") + } + }) + } +} + +func TestSchedulerNameFilter(t *testing.T) { + tests := []struct { + name string + schedulerNameFromOptions string + schedulerName string + expected bool + }{ + { + name: "matching scheduler names", + schedulerNameFromOptions: "test-scheduler", + schedulerName: "test-scheduler", + expected: true, + }, + { + name: "non-matching scheduler names", + schedulerNameFromOptions: "test-scheduler", + schedulerName: "other-scheduler", + expected: false, + }, + { + name: "empty scheduler name defaults to DefaultScheduler", + schedulerNameFromOptions: DefaultScheduler, + schedulerName: "", + expected: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := schedulerNameFilter(tt.schedulerNameFromOptions, tt.schedulerName) + assert.Equal(t, tt.expected, result) + }) + } +} + +// Helper functions + +func createCluster(name string, generation int64, labels map[string]string) *clusterv1alpha1.Cluster { + return &clusterv1alpha1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Generation: generation, + Labels: labels, + }, + } +} + +func createResourceBinding(name, schedulerName string, labels map[string]string) *workv1alpha2.ResourceBinding { + return &workv1alpha2.ResourceBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Labels: labels, + }, + Spec: workv1alpha2.ResourceBindingSpec{ + SchedulerName: schedulerName, + }, + } +} + +func createClusterResourceBinding(name, schedulerName string, labels map[string]string) *workv1alpha2.ClusterResourceBinding { + return &workv1alpha2.ClusterResourceBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Labels: labels, + }, + Spec: workv1alpha2.ResourceBindingSpec{ + SchedulerName: schedulerName, + }, + } +} + +// Mock Implementations + +// mockAsyncWorker is a mock implementation of util.AsyncWorker +type mockAsyncWorker struct { + addCount int + enqueueCount int + lastAdded interface{} + lastEnqueued interface{} +} + +func (m *mockAsyncWorker) Add(item interface{}) { + m.addCount++ + m.lastAdded = item +} + +func (m *mockAsyncWorker) Enqueue(item interface{}) { + m.enqueueCount++ + m.lastEnqueued = item +} + +func (m *mockAsyncWorker) AddAfter(_ interface{}, _ time.Duration) {} + +func (m *mockAsyncWorker) Run(_ int, _ <-chan struct{}) {} diff --git a/pkg/scheduler/framework/interface_test.go b/pkg/scheduler/framework/interface_test.go new file mode 100644 index 000000000000..6c4837568c0e --- /dev/null +++ b/pkg/scheduler/framework/interface_test.go @@ -0,0 +1,260 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package framework + +import ( + "errors" + "sort" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestPluginToResult_Merge(t *testing.T) { + tests := []struct { + name string + results PluginToResult + want *Result + }{ + { + name: "empty results", + results: PluginToResult{}, + want: nil, + }, + { + name: "all success results", + results: PluginToResult{ + "plugin1": NewResult(Success), + "plugin2": NewResult(Success), + }, + want: NewResult(Success), + }, + { + name: "mixed results with unschedulable", + results: PluginToResult{ + "plugin1": NewResult(Success), + "plugin2": NewResult(Unschedulable, "reason1"), + "plugin3": NewResult(Success), + }, + want: NewResult(Unschedulable, "reason1"), + }, + { + name: "mixed results with error", + results: PluginToResult{ + "plugin1": NewResult(Success), + "plugin2": NewResult(Unschedulable, "reason1"), + "plugin3": NewResult(Error, "error occurred"), + }, + want: NewResult(Error, "reason1", "error occurred"), + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := tt.results.Merge() + if tt.want == nil { + assert.Nil(t, got) + } else { + assert.NotNil(t, got) + assert.Equal(t, tt.want.code, got.code) + + // Sort the reasons before comparing + sortedWantReasons := make([]string, len(tt.want.reasons)) + copy(sortedWantReasons, tt.want.reasons) + sort.Strings(sortedWantReasons) + + sortedGotReasons := make([]string, len(got.reasons)) + copy(sortedGotReasons, got.reasons) + sort.Strings(sortedGotReasons) + + assert.Equal(t, sortedWantReasons, sortedGotReasons) + + if tt.want.err != nil { + assert.Error(t, got.err) + } else { + assert.NoError(t, got.err) + } + } + }) + } +} + +func TestResult_IsSuccess(t *testing.T) { + tests := []struct { + name string + result *Result + want bool + }{ + { + name: "nil result", + result: nil, + want: true, + }, + { + name: "success result", + result: NewResult(Success), + want: true, + }, + { + name: "unschedulable result", + result: NewResult(Unschedulable), + want: false, + }, + { + name: "error result", + result: NewResult(Error), + want: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + assert.Equal(t, tt.want, tt.result.IsSuccess()) + }) + } +} + +func TestResult_AsError(t *testing.T) { + tests := []struct { + name string + result *Result + wantErr bool + errorMsg string + }{ + { + name: "success result", + result: NewResult(Success), + wantErr: false, + }, + { + name: "unschedulable result", + result: NewResult(Unschedulable, "reason1", "reason2"), + wantErr: true, + errorMsg: "reason1, reason2", + }, + { + name: "error result", + result: NewResult(Error, "error occurred"), + wantErr: true, + errorMsg: "error occurred", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := tt.result.AsError() + if tt.wantErr { + assert.Error(t, err) + assert.Equal(t, tt.errorMsg, err.Error()) + } else { + assert.NoError(t, err) + } + }) + } +} + +func TestResult_AsResult(t *testing.T) { + tests := []struct { + name string + err error + wantCode Code + wantReasons []string + }{ + { + name: "non-nil error", + err: errors.New("test error"), + wantCode: Error, + wantReasons: []string{"test error"}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := AsResult(tt.err) + assert.Equal(t, tt.wantCode, got.code) + assert.Equal(t, tt.wantReasons, got.reasons) + assert.Equal(t, tt.err, got.err) + }) + } +} + +func TestResult_Code(t *testing.T) { + tests := []struct { + name string + result *Result + want Code + }{ + { + name: "nil result", + result: nil, + want: Success, + }, + { + name: "success result", + result: NewResult(Success), + want: Success, + }, + { + name: "unschedulable result", + result: NewResult(Unschedulable), + want: Unschedulable, + }, + { + name: "error result", + result: NewResult(Error), + want: Error, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := tt.result.Code() + assert.Equal(t, tt.want, got) + }) + } +} + +func TestCode_String(t *testing.T) { + tests := []struct { + name string + code Code + want string + }{ + { + name: "Success code", + code: Success, + want: "Success", + }, + { + name: "Unschedulable code", + code: Unschedulable, + want: "Unschedulable", + }, + { + name: "Error code", + code: Error, + want: "Error", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := tt.code.String() + assert.Equal(t, tt.want, got) + }) + } +} diff --git a/pkg/scheduler/framework/plugins/apienablement/api_enablement_test.go b/pkg/scheduler/framework/plugins/apienablement/api_enablement_test.go new file mode 100644 index 000000000000..cf68ae0c0bac --- /dev/null +++ b/pkg/scheduler/framework/plugins/apienablement/api_enablement_test.go @@ -0,0 +1,147 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package apienablement + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + clusterv1alpha1 "github.com/karmada-io/karmada/pkg/apis/cluster/v1alpha1" + workv1alpha2 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2" + "github.com/karmada-io/karmada/pkg/scheduler/framework" +) + +func TestAPIEnablement_Filter(t *testing.T) { + tests := []struct { + name string + bindingSpec *workv1alpha2.ResourceBindingSpec + cluster *clusterv1alpha1.Cluster + expectedCode framework.Code + expectError bool + }{ + { + name: "API is enabled in cluster", + bindingSpec: &workv1alpha2.ResourceBindingSpec{ + Resource: workv1alpha2.ObjectReference{ + APIVersion: "apps/v1", + Kind: "Deployment", + }, + }, + cluster: &clusterv1alpha1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster1", + }, + Status: clusterv1alpha1.ClusterStatus{ + APIEnablements: []clusterv1alpha1.APIEnablement{ + { + GroupVersion: "apps/v1", + Resources: []clusterv1alpha1.APIResource{ + { + Kind: "Deployment", + }, + }, + }, + }, + }, + }, + expectedCode: framework.Success, + expectError: false, + }, + { + name: "API is not enabled in cluster", + bindingSpec: &workv1alpha2.ResourceBindingSpec{ + Resource: workv1alpha2.ObjectReference{ + APIVersion: "custom.io/v1", + Kind: "CustomResource", + }, + }, + cluster: &clusterv1alpha1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster1", + }, + Status: clusterv1alpha1.ClusterStatus{ + APIEnablements: []clusterv1alpha1.APIEnablement{ + { + GroupVersion: "apps/v1", + Resources: []clusterv1alpha1.APIResource{ + { + Kind: "Deployment", + }, + }, + }, + }, + }, + }, + expectedCode: framework.Unschedulable, + expectError: true, + }, + { + name: "cluster in target list with incomplete API enablements", + bindingSpec: &workv1alpha2.ResourceBindingSpec{ + Resource: workv1alpha2.ObjectReference{ + APIVersion: "custom.io/v1", + Kind: "CustomResource", + }, + Clusters: []workv1alpha2.TargetCluster{ + { + Name: "cluster1", + }, + }, + }, + cluster: &clusterv1alpha1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster1", + }, + Status: clusterv1alpha1.ClusterStatus{ + Conditions: []metav1.Condition{ + { + Type: clusterv1alpha1.ClusterConditionCompleteAPIEnablements, + Status: metav1.ConditionFalse, + }, + }, + }, + }, + expectedCode: framework.Success, + expectError: false, + }, + } + + p := &APIEnablement{} + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := p.Filter(context.Background(), tt.bindingSpec, nil, tt.cluster) + assert.Equal(t, tt.expectedCode, result.Code()) + assert.Equal(t, tt.expectError, result.AsError() != nil) + }) + } +} + +func TestNew(t *testing.T) { + plugin, err := New() + assert.NoError(t, err) + assert.NotNil(t, plugin) + _, ok := plugin.(*APIEnablement) + assert.True(t, ok) +} + +func TestAPIEnablement_Name(t *testing.T) { + p := &APIEnablement{} + assert.Equal(t, Name, p.Name()) +} diff --git a/pkg/scheduler/framework/plugins/clusteraffinity/cluster_affinity_test.go b/pkg/scheduler/framework/plugins/clusteraffinity/cluster_affinity_test.go new file mode 100644 index 000000000000..f18ff25e6e9d --- /dev/null +++ b/pkg/scheduler/framework/plugins/clusteraffinity/cluster_affinity_test.go @@ -0,0 +1,160 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package clusteraffinity + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + clusterv1alpha1 "github.com/karmada-io/karmada/pkg/apis/cluster/v1alpha1" + policyv1alpha1 "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1" + workv1alpha2 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2" + "github.com/karmada-io/karmada/pkg/scheduler/framework" +) + +func TestClusterAffinity_Filter(t *testing.T) { + tests := []struct { + name string + bindingSpec *workv1alpha2.ResourceBindingSpec + bindingStatus *workv1alpha2.ResourceBindingStatus + cluster *clusterv1alpha1.Cluster + expectedCode framework.Code + expectError bool + }{ + { + name: "matching affinity", + bindingSpec: &workv1alpha2.ResourceBindingSpec{ + Placement: &policyv1alpha1.Placement{ + ClusterAffinity: &policyv1alpha1.ClusterAffinity{ + ClusterNames: []string{"cluster1"}, + }, + }, + }, + cluster: &clusterv1alpha1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster1", + }, + }, + expectedCode: framework.Success, + expectError: false, + }, + { + name: "non-matching affinity", + bindingSpec: &workv1alpha2.ResourceBindingSpec{ + Placement: &policyv1alpha1.Placement{ + ClusterAffinity: &policyv1alpha1.ClusterAffinity{ + ClusterNames: []string{"cluster2"}, + }, + }, + }, + cluster: &clusterv1alpha1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster1", + }, + }, + expectedCode: framework.Unschedulable, + expectError: true, + }, + { + name: "matching affinity from ClusterAffinities", + bindingSpec: &workv1alpha2.ResourceBindingSpec{ + Placement: &policyv1alpha1.Placement{ + ClusterAffinities: []policyv1alpha1.ClusterAffinityTerm{ + { + AffinityName: "affinity1", + ClusterAffinity: policyv1alpha1.ClusterAffinity{ + ClusterNames: []string{"cluster1"}, + }, + }, + }, + }, + }, + bindingStatus: &workv1alpha2.ResourceBindingStatus{ + SchedulerObservedAffinityName: "affinity1", + }, + cluster: &clusterv1alpha1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster1", + }, + }, + expectedCode: framework.Success, + expectError: false, + }, + { + name: "no affinity specified", + bindingSpec: &workv1alpha2.ResourceBindingSpec{ + Placement: &policyv1alpha1.Placement{}, + }, + cluster: &clusterv1alpha1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster1", + }, + }, + expectedCode: framework.Success, + expectError: false, + }, + } + + p := &ClusterAffinity{} + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := p.Filter(context.Background(), tt.bindingSpec, tt.bindingStatus, tt.cluster) + assert.Equal(t, tt.expectedCode, result.Code()) + assert.Equal(t, tt.expectError, result.AsError() != nil) + }) + } +} + +func TestClusterAffinity_Score(t *testing.T) { + p := &ClusterAffinity{} + spec := &workv1alpha2.ResourceBindingSpec{} + cluster := &clusterv1alpha1.Cluster{} + + score, result := p.Score(context.Background(), spec, cluster) + + assert.Equal(t, framework.MinClusterScore, score) + assert.Equal(t, framework.Success, result.Code()) +} + +func TestClusterAffinity_ScoreExtensions(t *testing.T) { + p := &ClusterAffinity{} + assert.Equal(t, p, p.ScoreExtensions()) +} + +func TestClusterAffinity_NormalizeScore(t *testing.T) { + p := &ClusterAffinity{} + result := p.NormalizeScore(context.Background(), nil) + assert.Equal(t, framework.Success, result.Code()) +} + +func TestNew(t *testing.T) { + plugin, err := New() + + assert.NoError(t, err) + assert.NotNil(t, plugin) + _, ok := plugin.(*ClusterAffinity) + assert.True(t, ok) +} + +func TestClusterAffinity_Name(t *testing.T) { + p := &ClusterAffinity{} + assert.Equal(t, Name, p.Name()) +} diff --git a/pkg/scheduler/framework/plugins/clustereviction/cluster_eviction_test.go b/pkg/scheduler/framework/plugins/clustereviction/cluster_eviction_test.go new file mode 100644 index 000000000000..18111cb1927b --- /dev/null +++ b/pkg/scheduler/framework/plugins/clustereviction/cluster_eviction_test.go @@ -0,0 +1,107 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package clustereviction + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + clusterv1alpha1 "github.com/karmada-io/karmada/pkg/apis/cluster/v1alpha1" + workv1alpha2 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2" + "github.com/karmada-io/karmada/pkg/scheduler/framework" +) + +func TestClusterEviction_Filter(t *testing.T) { + tests := []struct { + name string + bindingSpec *workv1alpha2.ResourceBindingSpec + cluster *clusterv1alpha1.Cluster + expectedCode framework.Code + expectError bool + }{ + { + name: "cluster is in graceful eviction tasks", + bindingSpec: &workv1alpha2.ResourceBindingSpec{ + GracefulEvictionTasks: []workv1alpha2.GracefulEvictionTask{ + { + FromCluster: "cluster1", + }, + }, + }, + cluster: &clusterv1alpha1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster1", + }, + }, + expectedCode: framework.Unschedulable, + expectError: true, + }, + { + name: "cluster is not in graceful eviction tasks", + bindingSpec: &workv1alpha2.ResourceBindingSpec{ + GracefulEvictionTasks: []workv1alpha2.GracefulEvictionTask{ + { + FromCluster: "cluster2", + }, + }, + }, + cluster: &clusterv1alpha1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster1", + }, + }, + expectedCode: framework.Success, + expectError: false, + }, + { + name: "no graceful eviction tasks", + bindingSpec: &workv1alpha2.ResourceBindingSpec{}, + cluster: &clusterv1alpha1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster1", + }, + }, + expectedCode: framework.Success, + expectError: false, + }, + } + + p := &ClusterEviction{} + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := p.Filter(context.Background(), tt.bindingSpec, nil, tt.cluster) + assert.Equal(t, tt.expectedCode, result.Code()) + assert.Equal(t, tt.expectError, result.AsError() != nil) + }) + } +} + +func TestNew(t *testing.T) { + plugin, err := New() + assert.NoError(t, err) + assert.NotNil(t, plugin) + _, ok := plugin.(*ClusterEviction) + assert.True(t, ok) +} + +func TestClusterEviction_Name(t *testing.T) { + p := &ClusterEviction{} + assert.Equal(t, Name, p.Name()) +} diff --git a/pkg/scheduler/framework/plugins/clusterlocality/cluster_locality_test.go b/pkg/scheduler/framework/plugins/clusterlocality/cluster_locality_test.go new file mode 100644 index 000000000000..6ed32ec73c9a --- /dev/null +++ b/pkg/scheduler/framework/plugins/clusterlocality/cluster_locality_test.go @@ -0,0 +1,109 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package clusterlocality + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + clusterv1alpha1 "github.com/karmada-io/karmada/pkg/apis/cluster/v1alpha1" + workv1alpha2 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2" + "github.com/karmada-io/karmada/pkg/scheduler/framework" +) + +func TestClusterLocality_Score(t *testing.T) { + tests := []struct { + name string + bindingSpec *workv1alpha2.ResourceBindingSpec + cluster *clusterv1alpha1.Cluster + expectedScore int64 + }{ + { + name: "no clusters in spec", + bindingSpec: &workv1alpha2.ResourceBindingSpec{ + Clusters: []workv1alpha2.TargetCluster{}, + }, + cluster: &clusterv1alpha1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster1", + }, + }, + expectedScore: framework.MinClusterScore, + }, + { + name: "cluster in spec", + bindingSpec: &workv1alpha2.ResourceBindingSpec{ + Clusters: []workv1alpha2.TargetCluster{ + {Name: "cluster1"}, + {Name: "cluster2"}, + }, + }, + cluster: &clusterv1alpha1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster1", + }, + }, + expectedScore: framework.MaxClusterScore, + }, + { + name: "cluster not in spec", + bindingSpec: &workv1alpha2.ResourceBindingSpec{ + Clusters: []workv1alpha2.TargetCluster{ + {Name: "cluster2"}, + {Name: "cluster3"}, + }, + }, + cluster: &clusterv1alpha1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster1", + }, + }, + expectedScore: framework.MinClusterScore, + }, + } + + p := &ClusterLocality{} + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + score, result := p.Score(context.Background(), tt.bindingSpec, tt.cluster) + assert.Equal(t, tt.expectedScore, score) + assert.Equal(t, framework.Success, result.Code()) + }) + } +} + +func TestNew(t *testing.T) { + plugin, err := New() + assert.NoError(t, err) + assert.NotNil(t, plugin) + _, ok := plugin.(*ClusterLocality) + assert.True(t, ok) +} + +func TestClusterLocality_Name(t *testing.T) { + p := &ClusterLocality{} + assert.Equal(t, Name, p.Name()) +} + +func TestClusterLocality_ScoreExtensions(t *testing.T) { + p := &ClusterLocality{} + assert.Nil(t, p.ScoreExtensions()) +} diff --git a/pkg/scheduler/framework/plugins/spreadconstraint/spread_constraint_test.go b/pkg/scheduler/framework/plugins/spreadconstraint/spread_constraint_test.go new file mode 100644 index 000000000000..771171a17178 --- /dev/null +++ b/pkg/scheduler/framework/plugins/spreadconstraint/spread_constraint_test.go @@ -0,0 +1,160 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spreadconstraint + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + + clusterv1alpha1 "github.com/karmada-io/karmada/pkg/apis/cluster/v1alpha1" + policyv1alpha1 "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1" + workv1alpha2 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2" + "github.com/karmada-io/karmada/pkg/scheduler/framework" +) + +func TestSpreadConstraint_Filter(t *testing.T) { + tests := []struct { + name string + bindingSpec *workv1alpha2.ResourceBindingSpec + cluster *clusterv1alpha1.Cluster + expectedCode framework.Code + expectedReason string + }{ + { + name: "no spread constraints", + bindingSpec: &workv1alpha2.ResourceBindingSpec{ + Placement: &policyv1alpha1.Placement{}, + }, + cluster: &clusterv1alpha1.Cluster{}, + expectedCode: framework.Success, + }, + { + name: "spread by provider - provider present", + bindingSpec: &workv1alpha2.ResourceBindingSpec{ + Placement: &policyv1alpha1.Placement{ + SpreadConstraints: []policyv1alpha1.SpreadConstraint{ + {SpreadByField: policyv1alpha1.SpreadByFieldProvider}, + }, + }, + }, + cluster: &clusterv1alpha1.Cluster{ + Spec: clusterv1alpha1.ClusterSpec{ + Provider: "aws", + }, + }, + expectedCode: framework.Success, + }, + { + name: "spread by provider - provider missing", + bindingSpec: &workv1alpha2.ResourceBindingSpec{ + Placement: &policyv1alpha1.Placement{ + SpreadConstraints: []policyv1alpha1.SpreadConstraint{ + {SpreadByField: policyv1alpha1.SpreadByFieldProvider}, + }, + }, + }, + cluster: &clusterv1alpha1.Cluster{}, + expectedCode: framework.Unschedulable, + expectedReason: "cluster(s) did not have provider property", + }, + { + name: "spread by region - region present", + bindingSpec: &workv1alpha2.ResourceBindingSpec{ + Placement: &policyv1alpha1.Placement{ + SpreadConstraints: []policyv1alpha1.SpreadConstraint{ + {SpreadByField: policyv1alpha1.SpreadByFieldRegion}, + }, + }, + }, + cluster: &clusterv1alpha1.Cluster{ + Spec: clusterv1alpha1.ClusterSpec{ + Region: "us-west-2", + }, + }, + expectedCode: framework.Success, + }, + { + name: "spread by region - region missing", + bindingSpec: &workv1alpha2.ResourceBindingSpec{ + Placement: &policyv1alpha1.Placement{ + SpreadConstraints: []policyv1alpha1.SpreadConstraint{ + {SpreadByField: policyv1alpha1.SpreadByFieldRegion}, + }, + }, + }, + cluster: &clusterv1alpha1.Cluster{}, + expectedCode: framework.Unschedulable, + expectedReason: "cluster(s) did not have region property", + }, + { + name: "spread by zone - zones present", + bindingSpec: &workv1alpha2.ResourceBindingSpec{ + Placement: &policyv1alpha1.Placement{ + SpreadConstraints: []policyv1alpha1.SpreadConstraint{ + {SpreadByField: policyv1alpha1.SpreadByFieldZone}, + }, + }, + }, + cluster: &clusterv1alpha1.Cluster{ + Spec: clusterv1alpha1.ClusterSpec{ + Zones: []string{"us-west-2a"}, + }, + }, + expectedCode: framework.Success, + }, + { + name: "spread by zone - zones missing", + bindingSpec: &workv1alpha2.ResourceBindingSpec{ + Placement: &policyv1alpha1.Placement{ + SpreadConstraints: []policyv1alpha1.SpreadConstraint{ + {SpreadByField: policyv1alpha1.SpreadByFieldZone}, + }, + }, + }, + cluster: &clusterv1alpha1.Cluster{}, + expectedCode: framework.Unschedulable, + expectedReason: "cluster(s) did not have zones property", + }, + } + + p := &SpreadConstraint{} + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := p.Filter(context.Background(), tt.bindingSpec, nil, tt.cluster) + assert.Equal(t, tt.expectedCode, result.Code()) + if tt.expectedReason != "" { + assert.Contains(t, result.AsError().Error(), tt.expectedReason) + } + }) + } +} + +func TestNew(t *testing.T) { + plugin, err := New() + assert.NoError(t, err) + assert.NotNil(t, plugin) + _, ok := plugin.(*SpreadConstraint) + assert.True(t, ok) +} + +func TestSpreadConstraint_Name(t *testing.T) { + p := &SpreadConstraint{} + assert.Equal(t, Name, p.Name()) +} diff --git a/pkg/scheduler/framework/plugins/tainttoleration/taint_toleration_test.go b/pkg/scheduler/framework/plugins/tainttoleration/taint_toleration_test.go new file mode 100644 index 000000000000..05a39c6e2d51 --- /dev/null +++ b/pkg/scheduler/framework/plugins/tainttoleration/taint_toleration_test.go @@ -0,0 +1,135 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package tainttoleration + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + clusterv1alpha1 "github.com/karmada-io/karmada/pkg/apis/cluster/v1alpha1" + policyv1alpha1 "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1" + workv1alpha2 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2" + "github.com/karmada-io/karmada/pkg/scheduler/framework" +) + +func TestTaintToleration_Filter(t *testing.T) { + tests := []struct { + name string + bindingSpec *workv1alpha2.ResourceBindingSpec + cluster *clusterv1alpha1.Cluster + expectedCode framework.Code + expectedReason string + }{ + { + name: "cluster already in target clusters", + bindingSpec: &workv1alpha2.ResourceBindingSpec{ + Clusters: []workv1alpha2.TargetCluster{ + {Name: "cluster1"}, + }, + }, + cluster: &clusterv1alpha1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster1", + }, + }, + expectedCode: framework.Success, + }, + { + name: "no taints", + bindingSpec: &workv1alpha2.ResourceBindingSpec{ + Placement: &policyv1alpha1.Placement{}, + }, + cluster: &clusterv1alpha1.Cluster{}, + expectedCode: framework.Success, + }, + { + name: "tolerated taint", + bindingSpec: &workv1alpha2.ResourceBindingSpec{ + Placement: &policyv1alpha1.Placement{ + ClusterTolerations: []corev1.Toleration{ + { + Key: "key1", + Operator: corev1.TolerationOpEqual, + Value: "value1", + Effect: corev1.TaintEffectNoSchedule, + }, + }, + }, + }, + cluster: &clusterv1alpha1.Cluster{ + Spec: clusterv1alpha1.ClusterSpec{ + Taints: []corev1.Taint{ + { + Key: "key1", + Value: "value1", + Effect: corev1.TaintEffectNoSchedule, + }, + }, + }, + }, + expectedCode: framework.Success, + }, + { + name: "untolerated taint", + bindingSpec: &workv1alpha2.ResourceBindingSpec{ + Placement: &policyv1alpha1.Placement{}, + }, + cluster: &clusterv1alpha1.Cluster{ + Spec: clusterv1alpha1.ClusterSpec{ + Taints: []corev1.Taint{ + { + Key: "key1", + Value: "value1", + Effect: corev1.TaintEffectNoSchedule, + }, + }, + }, + }, + expectedCode: framework.Unschedulable, + expectedReason: "cluster(s) had untolerated taint {key1=value1:NoSchedule}", + }, + } + + p := &TaintToleration{} + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := p.Filter(context.Background(), tt.bindingSpec, nil, tt.cluster) + assert.Equal(t, tt.expectedCode, result.Code()) + if tt.expectedReason != "" { + assert.Contains(t, result.AsError().Error(), tt.expectedReason) + } + }) + } +} + +func TestNew(t *testing.T) { + plugin, err := New() + assert.NoError(t, err) + assert.NotNil(t, plugin) + _, ok := plugin.(*TaintToleration) + assert.True(t, ok) +} + +func TestTaintToleration_Name(t *testing.T) { + p := &TaintToleration{} + assert.Equal(t, Name, p.Name()) +} diff --git a/pkg/scheduler/framework/types_test.go b/pkg/scheduler/framework/types_test.go new file mode 100644 index 000000000000..10f2ddb4c1b5 --- /dev/null +++ b/pkg/scheduler/framework/types_test.go @@ -0,0 +1,183 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package framework + +import ( + "strings" + "testing" + + "github.com/stretchr/testify/assert" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + clusterv1alpha1 "github.com/karmada-io/karmada/pkg/apis/cluster/v1alpha1" +) + +func TestNewClusterInfo(t *testing.T) { + testCases := []struct { + name string + cluster *clusterv1alpha1.Cluster + want *ClusterInfo + }{ + { + name: "Create ClusterInfo with valid cluster", + cluster: &clusterv1alpha1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cluster", + }, + }, + want: &ClusterInfo{ + cluster: &clusterv1alpha1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cluster", + }, + }, + }, + }, + { + name: "Create ClusterInfo with nil cluster", + cluster: nil, + want: &ClusterInfo{cluster: nil}, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + got := NewClusterInfo(tc.cluster) + assert.Equal(t, tc.want, got) + }) + } +} + +func TestClusterInfo_Cluster(t *testing.T) { + testCases := []struct { + name string + clusterInfo *ClusterInfo + want *clusterv1alpha1.Cluster + }{ + { + name: "Get cluster from valid ClusterInfo", + clusterInfo: &ClusterInfo{ + cluster: &clusterv1alpha1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cluster", + }, + }, + }, + want: &clusterv1alpha1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cluster", + }, + }, + }, + { + name: "Get cluster from nil ClusterInfo", + clusterInfo: nil, + want: nil, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + got := tc.clusterInfo.Cluster() + assert.Equal(t, tc.want, got) + }) + } +} + +func TestFitError_Error(t *testing.T) { + testCases := []struct { + name string + fitError FitError + expectedOutput string + expectedReasons []string + }{ + { + name: "No clusters available", + fitError: FitError{ + NumAllClusters: 0, + Diagnosis: Diagnosis{ClusterToResultMap: ClusterToResultMap{}}, + }, + expectedOutput: "0/0 clusters are available: no cluster exists.", + expectedReasons: []string{}, + }, + { + name: "Multiple reasons for unavailability", + fitError: FitError{ + NumAllClusters: 3, + Diagnosis: Diagnosis{ + ClusterToResultMap: ClusterToResultMap{ + "cluster1": &Result{reasons: []string{"insufficient CPU", "insufficient memory"}}, + "cluster2": &Result{reasons: []string{"insufficient CPU"}}, + "cluster3": &Result{reasons: []string{"taint mismatch"}}, + }, + }, + }, + expectedOutput: "0/3 clusters are available:", + expectedReasons: []string{ + "2 insufficient CPU", + "1 insufficient memory", + "1 taint mismatch", + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + got := tc.fitError.Error() + + // Check if the error message starts with the expected output + assert.True(t, strings.HasPrefix(got, tc.expectedOutput), "Error message should start with expected output") + + if len(tc.expectedReasons) > 0 { + // Check each reason + for _, reason := range tc.expectedReasons { + assert.Contains(t, got, reason, "Error message should contain the reason: %s", reason) + } + + // Check the total number of reasons + gotReasons := strings.Split(strings.TrimPrefix(got, tc.expectedOutput), ",") + assert.Equal(t, len(tc.expectedReasons), len(gotReasons), "Number of reasons should match") + } else { + // If no reasons are expected, the got message should exactly match the expected output + assert.Equal(t, tc.expectedOutput, got, "Error message should exactly match expected output when no reasons are provided") + } + }) + } +} + +func TestUnschedulableError_Error(t *testing.T) { + testCases := []struct { + name string + message string + }{ + { + name: "Unschedulable due to insufficient resources", + message: "Insufficient CPU in all clusters", + }, + { + name: "Unschedulable due to taint mismatch", + message: "No cluster matches required tolerations", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + unschedulableErr := UnschedulableError{Message: tc.message} + assert.Equal(t, tc.message, unschedulableErr.Error()) + }) + } +} diff --git a/pkg/scheduler/metrics/metrics_test.go b/pkg/scheduler/metrics/metrics_test.go new file mode 100644 index 000000000000..2872539b046c --- /dev/null +++ b/pkg/scheduler/metrics/metrics_test.go @@ -0,0 +1,102 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "errors" + "testing" + "time" +) + +func TestBindingSchedule(t *testing.T) { + tests := []struct { + name string + scheduleType string + duration float64 + err error + }{ + { + name: "Successful schedule", + scheduleType: "test", + duration: 1.5, + err: nil, + }, + { + name: "Failed schedule", + scheduleType: "test", + duration: 0.5, + err: errors.New("schedule failed"), + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(_ *testing.T) { + // We can't easily test the metric values directly, so we'll just ensure the function doesn't panic + BindingSchedule(tt.scheduleType, tt.duration, tt.err) + }) + } +} + +func TestScheduleStep(t *testing.T) { + tests := []struct { + name string + action string + duration time.Duration + }{ + { + name: "Filter step", + action: ScheduleStepFilter, + duration: 100 * time.Millisecond, + }, + { + name: "Score step", + action: ScheduleStepScore, + duration: 200 * time.Millisecond, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(_ *testing.T) { + startTime := time.Now().Add(-tt.duration) + // Ensure the function doesn't panic + ScheduleStep(tt.action, startTime) + }) + } +} + +func TestCountSchedulerBindings(t *testing.T) { + tests := []struct { + name string + event string + }{ + { + name: "Binding add event", + event: BindingAdd, + }, + { + name: "Binding update event", + event: BindingUpdate, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(_ *testing.T) { + // Ensure the function doesn't panic + CountSchedulerBindings(tt.event) + }) + } +} diff --git a/pkg/util/lifted/validatingfhpa_test.go b/pkg/util/lifted/validatingfhpa_test.go new file mode 100644 index 000000000000..4233adec0090 --- /dev/null +++ b/pkg/util/lifted/validatingfhpa_test.go @@ -0,0 +1,1320 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package lifted + +import ( + "testing" + + "github.com/stretchr/testify/assert" + autoscalingv2 "k8s.io/api/autoscaling/v2" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/utils/ptr" + + autoscalingv1alpha1 "github.com/karmada-io/karmada/pkg/apis/autoscaling/v1alpha1" +) + +func TestValidateFederatedHPA(t *testing.T) { + tests := []struct { + name string + fhpa *autoscalingv1alpha1.FederatedHPA + wantErr bool + }{ + { + name: "valid FederatedHPA", + fhpa: &autoscalingv1alpha1.FederatedHPA{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-fhpa", + Namespace: "default", + }, + Spec: autoscalingv1alpha1.FederatedHPASpec{ + ScaleTargetRef: autoscalingv2.CrossVersionObjectReference{ + Kind: "Deployment", + Name: "test-deployment", + }, + MinReplicas: ptr.To[int32](1), + MaxReplicas: 10, + Metrics: []autoscalingv2.MetricSpec{ + { + Type: autoscalingv2.ResourceMetricSourceType, + Resource: &autoscalingv2.ResourceMetricSource{ + Name: "cpu", + Target: autoscalingv2.MetricTarget{ + Type: autoscalingv2.UtilizationMetricType, + AverageUtilization: ptr.To[int32](50), + }, + }, + }, + }, + }, + }, + wantErr: false, + }, + { + name: "invalid name", + fhpa: &autoscalingv1alpha1.FederatedHPA{ + ObjectMeta: metav1.ObjectMeta{ + Name: "invalid/name", + Namespace: "default", + }, + Spec: autoscalingv1alpha1.FederatedHPASpec{ + ScaleTargetRef: autoscalingv2.CrossVersionObjectReference{ + Kind: "Deployment", + Name: "test-deployment", + }, + MinReplicas: ptr.To[int32](1), + MaxReplicas: 10, + }, + }, + wantErr: true, + }, + { + name: "invalid spec", + fhpa: &autoscalingv1alpha1.FederatedHPA{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-fhpa", + Namespace: "default", + }, + Spec: autoscalingv1alpha1.FederatedHPASpec{ + MinReplicas: ptr.To[int32](0), + MaxReplicas: 0, + }, + }, + wantErr: true, + }, + { + name: "missing namespace", + fhpa: &autoscalingv1alpha1.FederatedHPA{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-fhpa", + }, + Spec: autoscalingv1alpha1.FederatedHPASpec{ + ScaleTargetRef: autoscalingv2.CrossVersionObjectReference{ + Kind: "Deployment", + Name: "test-deployment", + }, + MinReplicas: ptr.To[int32](1), + MaxReplicas: 10, + }, + }, + wantErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + errors := ValidateFederatedHPA(tt.fhpa) + if tt.wantErr { + assert.NotEmpty(t, errors, "Expected validation errors, but got none") + } else { + assert.Empty(t, errors, "Expected no validation errors, but got: %v", errors) + } + }) + } +} + +func TestValidateFederatedHPASpec(t *testing.T) { + tests := []struct { + name string + spec *autoscalingv1alpha1.FederatedHPASpec + wantErr bool + }{ + { + name: "valid spec", + spec: &autoscalingv1alpha1.FederatedHPASpec{ + ScaleTargetRef: autoscalingv2.CrossVersionObjectReference{ + Kind: "Deployment", + Name: "test-deployment", + }, + MinReplicas: ptr.To[int32](1), + MaxReplicas: 10, + Metrics: []autoscalingv2.MetricSpec{ + { + Type: autoscalingv2.ResourceMetricSourceType, + Resource: &autoscalingv2.ResourceMetricSource{ + Name: "cpu", + Target: autoscalingv2.MetricTarget{ + Type: autoscalingv2.UtilizationMetricType, + AverageUtilization: ptr.To[int32](50), + }, + }, + }, + }, + }, + wantErr: false, + }, + { + name: "invalid minReplicas", + spec: &autoscalingv1alpha1.FederatedHPASpec{ + ScaleTargetRef: autoscalingv2.CrossVersionObjectReference{ + Kind: "Deployment", + Name: "test-deployment", + }, + MinReplicas: ptr.To[int32](0), + MaxReplicas: 10, + }, + wantErr: true, + }, + { + name: "maxReplicas less than minReplicas", + spec: &autoscalingv1alpha1.FederatedHPASpec{ + ScaleTargetRef: autoscalingv2.CrossVersionObjectReference{ + Kind: "Deployment", + Name: "test-deployment", + }, + MinReplicas: ptr.To[int32](5), + MaxReplicas: 3, + }, + wantErr: true, + }, + { + name: "invalid scaleTargetRef", + spec: &autoscalingv1alpha1.FederatedHPASpec{ + ScaleTargetRef: autoscalingv2.CrossVersionObjectReference{ + Kind: "", + Name: "test-deployment", + }, + MinReplicas: ptr.To[int32](1), + MaxReplicas: 10, + }, + wantErr: true, + }, + { + name: "invalid metrics", + spec: &autoscalingv1alpha1.FederatedHPASpec{ + ScaleTargetRef: autoscalingv2.CrossVersionObjectReference{ + Kind: "Deployment", + Name: "test-deployment", + }, + MinReplicas: ptr.To[int32](1), + MaxReplicas: 10, + Metrics: []autoscalingv2.MetricSpec{ + { + Type: autoscalingv2.ResourceMetricSourceType, + // Missing Resource field to trigger a validation error + }, + }, + }, + wantErr: true, + }, + { + name: "invalid behavior", + spec: &autoscalingv1alpha1.FederatedHPASpec{ + ScaleTargetRef: autoscalingv2.CrossVersionObjectReference{ + Kind: "Deployment", + Name: "test-deployment", + }, + MinReplicas: ptr.To[int32](1), + MaxReplicas: 10, + Behavior: &autoscalingv2.HorizontalPodAutoscalerBehavior{ + ScaleDown: &autoscalingv2.HPAScalingRules{ + StabilizationWindowSeconds: ptr.To[int32](-1), // Invalid: negative value + }, + }, + }, + wantErr: true, + }, + { + name: "maxReplicas less than 1", + spec: &autoscalingv1alpha1.FederatedHPASpec{ + ScaleTargetRef: autoscalingv2.CrossVersionObjectReference{ + Kind: "Deployment", + Name: "test-deployment", + }, + MinReplicas: ptr.To[int32](1), + MaxReplicas: 0, + }, + wantErr: true, + }, + { + name: "minReplicas equals maxReplicas", + spec: &autoscalingv1alpha1.FederatedHPASpec{ + ScaleTargetRef: autoscalingv2.CrossVersionObjectReference{ + Kind: "Deployment", + Name: "test-deployment", + }, + MinReplicas: ptr.To[int32](5), + MaxReplicas: 5, + }, + wantErr: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + errors := validateFederatedHPASpec(tt.spec, field.NewPath("spec"), 1) + if tt.wantErr { + assert.NotEmpty(t, errors, "Expected validation errors, but got none") + // Check for specific errors + if tt.name == "invalid minReplicas" { + assert.Contains(t, errors.ToAggregate().Error(), "minReplicas", "Expected error related to minReplicas") + } + } else { + assert.Empty(t, errors, "Expected no validation errors, but got: %v", errors) + } + }) + } +} + +func TestValidateCrossVersionObjectReference(t *testing.T) { + tests := []struct { + name string + ref autoscalingv2.CrossVersionObjectReference + wantErr bool + }{ + { + name: "valid reference", + ref: autoscalingv2.CrossVersionObjectReference{ + Kind: "Deployment", + Name: "my-deployment", + }, + wantErr: false, + }, + { + name: "missing kind", + ref: autoscalingv2.CrossVersionObjectReference{ + Name: "my-deployment", + }, + wantErr: true, + }, + { + name: "missing name", + ref: autoscalingv2.CrossVersionObjectReference{ + Kind: "Deployment", + }, + wantErr: true, + }, + { + name: "invalid kind", + ref: autoscalingv2.CrossVersionObjectReference{ + Kind: "Invalid/Kind", + Name: "my-deployment", + }, + wantErr: true, + }, + { + name: "invalid name", + ref: autoscalingv2.CrossVersionObjectReference{ + Kind: "Deployment", + Name: "my/deployment", + }, + wantErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + errors := ValidateCrossVersionObjectReference(tt.ref, field.NewPath("test")) + if tt.wantErr { + assert.NotEmpty(t, errors, "Expected validation errors, but got none") + } else { + assert.Empty(t, errors, "Expected no validation errors, but got: %v", errors) + } + }) + } +} + +func TestValidateFederatedHPAStatus(t *testing.T) { + tests := []struct { + name string + status *autoscalingv2.HorizontalPodAutoscalerStatus + wantErr bool + }{ + { + name: "valid status", + status: &autoscalingv2.HorizontalPodAutoscalerStatus{ + CurrentReplicas: 3, + DesiredReplicas: 5, + }, + wantErr: false, + }, + { + name: "negative current replicas", + status: &autoscalingv2.HorizontalPodAutoscalerStatus{ + CurrentReplicas: -1, + DesiredReplicas: 5, + }, + wantErr: true, + }, + { + name: "negative desired replicas", + status: &autoscalingv2.HorizontalPodAutoscalerStatus{ + CurrentReplicas: 3, + DesiredReplicas: -1, + }, + wantErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + errors := validateFederatedHPAStatus(tt.status) + if tt.wantErr { + assert.NotEmpty(t, errors, "Expected validation errors, but got none") + } else { + assert.Empty(t, errors, "Expected no validation errors, but got: %v", errors) + } + }) + } +} + +func TestValidateBehavior(t *testing.T) { + tests := []struct { + name string + behavior *autoscalingv2.HorizontalPodAutoscalerBehavior + wantErr bool + }{ + { + name: "valid behavior", + behavior: &autoscalingv2.HorizontalPodAutoscalerBehavior{ + ScaleUp: &autoscalingv2.HPAScalingRules{ + StabilizationWindowSeconds: ptr.To[int32](60), + Policies: []autoscalingv2.HPAScalingPolicy{ + { + Type: autoscalingv2.PercentScalingPolicy, + Value: 100, + PeriodSeconds: 15, + }, + }, + }, + ScaleDown: &autoscalingv2.HPAScalingRules{ + StabilizationWindowSeconds: ptr.To[int32](300), + Policies: []autoscalingv2.HPAScalingPolicy{ + { + Type: autoscalingv2.PercentScalingPolicy, + Value: 100, + PeriodSeconds: 15, + }, + }, + }, + }, + wantErr: false, + }, + { + name: "invalid scale up stabilization window", + behavior: &autoscalingv2.HorizontalPodAutoscalerBehavior{ + ScaleUp: &autoscalingv2.HPAScalingRules{ + StabilizationWindowSeconds: ptr.To[int32](-1), + }, + }, + wantErr: true, + }, + { + name: "invalid scale down policy", + behavior: &autoscalingv2.HorizontalPodAutoscalerBehavior{ + ScaleDown: &autoscalingv2.HPAScalingRules{ + Policies: []autoscalingv2.HPAScalingPolicy{ + { + Type: autoscalingv2.PercentScalingPolicy, + Value: -1, + PeriodSeconds: 15, + }, + }, + }, + }, + wantErr: true, + }, + { + name: "nil behavior", + behavior: nil, + wantErr: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + errors := validateBehavior(tt.behavior, field.NewPath("test")) + if tt.wantErr { + assert.NotEmpty(t, errors, "Expected validation errors, but got none") + } else { + assert.Empty(t, errors, "Expected no validation errors, but got: %v", errors) + } + }) + } +} +func TestValidateScalingRules(t *testing.T) { + validPolicy := autoscalingv2.HPAScalingPolicy{ + Type: autoscalingv2.PercentScalingPolicy, + Value: 100, + PeriodSeconds: 15, + } + + tests := []struct { + name string + rules *autoscalingv2.HPAScalingRules + wantErr bool + }{ + { + name: "nil rules", + rules: nil, + wantErr: false, + }, + { + name: "valid rules with Max select policy", + rules: &autoscalingv2.HPAScalingRules{ + StabilizationWindowSeconds: ptr.To[int32](300), + SelectPolicy: ptr.To[autoscalingv2.ScalingPolicySelect](autoscalingv2.MaxChangePolicySelect), + Policies: []autoscalingv2.HPAScalingPolicy{validPolicy}, + }, + wantErr: false, + }, + { + name: "valid rules with Min select policy", + rules: &autoscalingv2.HPAScalingRules{ + StabilizationWindowSeconds: ptr.To[int32](300), + SelectPolicy: ptr.To[autoscalingv2.ScalingPolicySelect](autoscalingv2.MinChangePolicySelect), + Policies: []autoscalingv2.HPAScalingPolicy{validPolicy}, + }, + wantErr: false, + }, + { + name: "valid rules with Disabled select policy", + rules: &autoscalingv2.HPAScalingRules{ + StabilizationWindowSeconds: ptr.To[int32](300), + SelectPolicy: ptr.To[autoscalingv2.ScalingPolicySelect](autoscalingv2.DisabledPolicySelect), + Policies: []autoscalingv2.HPAScalingPolicy{validPolicy}, + }, + wantErr: false, + }, + { + name: "negative stabilization window", + rules: &autoscalingv2.HPAScalingRules{ + StabilizationWindowSeconds: ptr.To[int32](-1), + Policies: []autoscalingv2.HPAScalingPolicy{validPolicy}, + }, + wantErr: true, + }, + { + name: "stabilization window exceeding max", + rules: &autoscalingv2.HPAScalingRules{ + StabilizationWindowSeconds: ptr.To[int32](MaxStabilizationWindowSeconds + 1), + Policies: []autoscalingv2.HPAScalingPolicy{validPolicy}, + }, + wantErr: true, + }, + { + name: "invalid select policy", + rules: &autoscalingv2.HPAScalingRules{ + SelectPolicy: ptr.To[autoscalingv2.ScalingPolicySelect]("InvalidPolicy"), + Policies: []autoscalingv2.HPAScalingPolicy{validPolicy}, + }, + wantErr: true, + }, + { + name: "no policies", + rules: &autoscalingv2.HPAScalingRules{ + Policies: []autoscalingv2.HPAScalingPolicy{}, + }, + wantErr: true, + }, + { + name: "invalid policy", + rules: &autoscalingv2.HPAScalingRules{ + Policies: []autoscalingv2.HPAScalingPolicy{ + { + Type: "InvalidType", + Value: 0, + PeriodSeconds: 0, + }, + }, + }, + wantErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + errors := validateScalingRules(tt.rules, field.NewPath("test")) + if tt.wantErr { + assert.NotEmpty(t, errors, "Expected validation errors, but got none") + } else { + assert.Empty(t, errors, "Expected no validation errors, but got: %v", errors) + } + }) + } +} + +func TestValidateScalingPolicy(t *testing.T) { + tests := []struct { + name string + policy autoscalingv2.HPAScalingPolicy + wantErr bool + }{ + { + name: "valid pods scaling policy", + policy: autoscalingv2.HPAScalingPolicy{ + Type: autoscalingv2.PodsScalingPolicy, + Value: 1, + PeriodSeconds: 15, + }, + wantErr: false, + }, + { + name: "valid percent scaling policy", + policy: autoscalingv2.HPAScalingPolicy{ + Type: autoscalingv2.PercentScalingPolicy, + Value: 10, + PeriodSeconds: 15, + }, + wantErr: false, + }, + { + name: "invalid policy type", + policy: autoscalingv2.HPAScalingPolicy{ + Type: "InvalidType", + Value: 1, + PeriodSeconds: 15, + }, + wantErr: true, + }, + { + name: "zero value", + policy: autoscalingv2.HPAScalingPolicy{ + Type: autoscalingv2.PodsScalingPolicy, + Value: 0, + PeriodSeconds: 15, + }, + wantErr: true, + }, + { + name: "negative value", + policy: autoscalingv2.HPAScalingPolicy{ + Type: autoscalingv2.PodsScalingPolicy, + Value: -1, + PeriodSeconds: 15, + }, + wantErr: true, + }, + { + name: "zero period seconds", + policy: autoscalingv2.HPAScalingPolicy{ + Type: autoscalingv2.PodsScalingPolicy, + Value: 1, + PeriodSeconds: 0, + }, + wantErr: true, + }, + { + name: "negative period seconds", + policy: autoscalingv2.HPAScalingPolicy{ + Type: autoscalingv2.PodsScalingPolicy, + Value: 1, + PeriodSeconds: -1, + }, + wantErr: true, + }, + { + name: "period seconds exceeding max", + policy: autoscalingv2.HPAScalingPolicy{ + Type: autoscalingv2.PodsScalingPolicy, + Value: 1, + PeriodSeconds: MaxPeriodSeconds + 1, + }, + wantErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + errors := validateScalingPolicy(tt.policy, field.NewPath("test")) + if tt.wantErr { + assert.NotEmpty(t, errors, "Expected validation errors, but got none") + } else { + assert.Empty(t, errors, "Expected no validation errors, but got: %v", errors) + } + }) + } +} + +func TestValidateMetricSpec(t *testing.T) { + tests := []struct { + name string + spec autoscalingv2.MetricSpec + wantErr bool + }{ + { + name: "valid resource metric", + spec: autoscalingv2.MetricSpec{ + Type: autoscalingv2.ResourceMetricSourceType, + Resource: &autoscalingv2.ResourceMetricSource{ + Name: "cpu", + Target: autoscalingv2.MetricTarget{ + Type: autoscalingv2.UtilizationMetricType, + AverageUtilization: ptr.To[int32](50), + }, + }, + }, + wantErr: false, + }, + { + name: "empty metric type", + spec: autoscalingv2.MetricSpec{}, + wantErr: true, + }, + { + name: "invalid metric type", + spec: autoscalingv2.MetricSpec{ + Type: "InvalidType", + }, + wantErr: true, + }, + { + name: "object metric without object", + spec: autoscalingv2.MetricSpec{ + Type: autoscalingv2.ObjectMetricSourceType, + }, + wantErr: true, + }, + { + name: "pods metric without pods", + spec: autoscalingv2.MetricSpec{ + Type: autoscalingv2.PodsMetricSourceType, + }, + wantErr: true, + }, + { + name: "resource metric without resource", + spec: autoscalingv2.MetricSpec{ + Type: autoscalingv2.ResourceMetricSourceType, + }, + wantErr: true, + }, + { + name: "container resource metric without container resource", + spec: autoscalingv2.MetricSpec{ + Type: autoscalingv2.ContainerResourceMetricSourceType, + }, + wantErr: true, + }, + { + name: "multiple metric sources", + spec: autoscalingv2.MetricSpec{ + Type: autoscalingv2.ResourceMetricSourceType, + Resource: &autoscalingv2.ResourceMetricSource{ + Name: "cpu", + Target: autoscalingv2.MetricTarget{ + Type: autoscalingv2.UtilizationMetricType, + AverageUtilization: ptr.To[int32](50), + }, + }, + Pods: &autoscalingv2.PodsMetricSource{}, + }, + wantErr: true, + }, { + name: "valid object metric", + spec: autoscalingv2.MetricSpec{ + Type: autoscalingv2.ObjectMetricSourceType, + Object: &autoscalingv2.ObjectMetricSource{ + DescribedObject: autoscalingv2.CrossVersionObjectReference{ + Kind: "Service", + Name: "my-service", + }, + Metric: autoscalingv2.MetricIdentifier{ + Name: "requests-per-second", + }, + Target: autoscalingv2.MetricTarget{ + Type: autoscalingv2.ValueMetricType, + Value: ptr.To(resource.MustParse("100")), + }, + }, + }, + wantErr: false, + }, + { + name: "valid container resource metric", + spec: autoscalingv2.MetricSpec{ + Type: autoscalingv2.ContainerResourceMetricSourceType, + ContainerResource: &autoscalingv2.ContainerResourceMetricSource{ + Name: "cpu", + Container: "app", + Target: autoscalingv2.MetricTarget{ + Type: autoscalingv2.UtilizationMetricType, + AverageUtilization: ptr.To[int32](50), + }, + }, + }, + wantErr: false, + }, + { + name: "multiple metric sources - object and container resource", + spec: autoscalingv2.MetricSpec{ + Type: autoscalingv2.ObjectMetricSourceType, + Object: &autoscalingv2.ObjectMetricSource{ + DescribedObject: autoscalingv2.CrossVersionObjectReference{ + Kind: "Service", + Name: "my-service", + }, + Metric: autoscalingv2.MetricIdentifier{ + Name: "requests-per-second", + }, + Target: autoscalingv2.MetricTarget{ + Type: autoscalingv2.ValueMetricType, + Value: ptr.To(resource.MustParse("100")), + }, + }, + ContainerResource: &autoscalingv2.ContainerResourceMetricSource{ + Name: "cpu", + Container: "app", + Target: autoscalingv2.MetricTarget{ + Type: autoscalingv2.UtilizationMetricType, + AverageUtilization: ptr.To[int32](50), + }, + }, + }, + wantErr: true, + }, + { + name: "multiple metric sources - all types", + spec: autoscalingv2.MetricSpec{ + Type: autoscalingv2.ObjectMetricSourceType, + Object: &autoscalingv2.ObjectMetricSource{ + DescribedObject: autoscalingv2.CrossVersionObjectReference{ + Kind: "Service", + Name: "my-service", + }, + Metric: autoscalingv2.MetricIdentifier{ + Name: "requests-per-second", + }, + Target: autoscalingv2.MetricTarget{ + Type: autoscalingv2.ValueMetricType, + Value: ptr.To(resource.MustParse("100")), + }, + }, + Pods: &autoscalingv2.PodsMetricSource{ + Metric: autoscalingv2.MetricIdentifier{ + Name: "packets-per-second", + }, + Target: autoscalingv2.MetricTarget{ + Type: autoscalingv2.AverageValueMetricType, + AverageValue: ptr.To(resource.MustParse("1k")), + }, + }, + Resource: &autoscalingv2.ResourceMetricSource{ + Name: "cpu", + Target: autoscalingv2.MetricTarget{ + Type: autoscalingv2.UtilizationMetricType, + AverageUtilization: ptr.To[int32](50), + }, + }, + ContainerResource: &autoscalingv2.ContainerResourceMetricSource{ + Name: "memory", + Container: "app", + Target: autoscalingv2.MetricTarget{ + Type: autoscalingv2.UtilizationMetricType, + AverageUtilization: ptr.To[int32](60), + }, + }, + }, + wantErr: true, + }, + { + name: "mismatched type and source - object", + spec: autoscalingv2.MetricSpec{ + Type: autoscalingv2.PodsMetricSourceType, + Object: &autoscalingv2.ObjectMetricSource{ + DescribedObject: autoscalingv2.CrossVersionObjectReference{ + Kind: "Service", + Name: "my-service", + }, + Metric: autoscalingv2.MetricIdentifier{ + Name: "requests-per-second", + }, + Target: autoscalingv2.MetricTarget{ + Type: autoscalingv2.ValueMetricType, + Value: ptr.To(resource.MustParse("100")), + }, + }, + }, + wantErr: true, + }, + { + name: "mismatched type and source - container resource", + spec: autoscalingv2.MetricSpec{ + Type: autoscalingv2.ResourceMetricSourceType, + ContainerResource: &autoscalingv2.ContainerResourceMetricSource{ + Name: "cpu", + Container: "app", + Target: autoscalingv2.MetricTarget{ + Type: autoscalingv2.UtilizationMetricType, + AverageUtilization: ptr.To[int32](50), + }, + }, + }, + wantErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + errors := validateMetricSpec(tt.spec, field.NewPath("test")) + if tt.wantErr { + assert.NotEmpty(t, errors, "Expected validation errors, but got none") + } else { + assert.Empty(t, errors, "Expected no validation errors, but got: %v", errors) + } + }) + } +} + +func TestValidateObjectSource(t *testing.T) { + tests := []struct { + name string + src autoscalingv2.ObjectMetricSource + wantErr bool + }{ + { + name: "valid object metric", + src: autoscalingv2.ObjectMetricSource{ + DescribedObject: autoscalingv2.CrossVersionObjectReference{ + Kind: "Service", + Name: "my-service", + }, + Metric: autoscalingv2.MetricIdentifier{ + Name: "requests-per-second", + }, + Target: autoscalingv2.MetricTarget{ + Type: autoscalingv2.ValueMetricType, + Value: ptr.To(resource.MustParse("100")), + }, + }, + wantErr: false, + }, + { + name: "missing described object", + src: autoscalingv2.ObjectMetricSource{ + Metric: autoscalingv2.MetricIdentifier{ + Name: "requests-per-second", + }, + Target: autoscalingv2.MetricTarget{ + Type: autoscalingv2.ValueMetricType, + Value: ptr.To(resource.MustParse("100")), + }, + }, + wantErr: true, + }, + { + name: "missing metric name", + src: autoscalingv2.ObjectMetricSource{ + DescribedObject: autoscalingv2.CrossVersionObjectReference{ + Kind: "Service", + Name: "my-service", + }, + Target: autoscalingv2.MetricTarget{ + Type: autoscalingv2.ValueMetricType, + Value: ptr.To(resource.MustParse("100")), + }, + }, + wantErr: true, + }, + { + name: "missing target value and average value", + src: autoscalingv2.ObjectMetricSource{ + DescribedObject: autoscalingv2.CrossVersionObjectReference{ + Kind: "Service", + Name: "my-service", + }, + Metric: autoscalingv2.MetricIdentifier{ + Name: "requests-per-second", + }, + Target: autoscalingv2.MetricTarget{ + Type: autoscalingv2.ValueMetricType, + }, + }, + wantErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + errors := validateObjectSource(&tt.src, field.NewPath("test")) + if tt.wantErr { + assert.NotEmpty(t, errors, "Expected validation errors, but got none") + } else { + assert.Empty(t, errors, "Expected no validation errors, but got: %v", errors) + } + }) + } +} + +func TestValidatePodsSource(t *testing.T) { + tests := []struct { + name string + src autoscalingv2.PodsMetricSource + wantErr bool + }{ + { + name: "valid pods metric", + src: autoscalingv2.PodsMetricSource{ + Metric: autoscalingv2.MetricIdentifier{ + Name: "packets-per-second", + }, + Target: autoscalingv2.MetricTarget{ + Type: autoscalingv2.AverageValueMetricType, + AverageValue: ptr.To(resource.MustParse("1k")), + }, + }, + wantErr: false, + }, + { + name: "valid pods metric with selector", + src: autoscalingv2.PodsMetricSource{ + Metric: autoscalingv2.MetricIdentifier{ + Name: "packets-per-second", + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "web"}, + }, + }, + Target: autoscalingv2.MetricTarget{ + Type: autoscalingv2.AverageValueMetricType, + AverageValue: ptr.To(resource.MustParse("1k")), + }, + }, + wantErr: false, + }, + { + name: "missing metric name", + src: autoscalingv2.PodsMetricSource{ + Metric: autoscalingv2.MetricIdentifier{}, + Target: autoscalingv2.MetricTarget{ + Type: autoscalingv2.AverageValueMetricType, + AverageValue: ptr.To(resource.MustParse("1k")), + }, + }, + wantErr: true, + }, + { + name: "missing average value", + src: autoscalingv2.PodsMetricSource{ + Metric: autoscalingv2.MetricIdentifier{ + Name: "packets-per-second", + }, + Target: autoscalingv2.MetricTarget{ + Type: autoscalingv2.AverageValueMetricType, + }, + }, + wantErr: true, + }, + { + name: "invalid target type", + src: autoscalingv2.PodsMetricSource{ + Metric: autoscalingv2.MetricIdentifier{ + Name: "packets-per-second", + }, + Target: autoscalingv2.MetricTarget{ + Type: autoscalingv2.UtilizationMetricType, + Value: ptr.To(resource.MustParse("1k")), + }, + }, + wantErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + errors := validatePodsSource(&tt.src, field.NewPath("test")) + if tt.wantErr { + assert.NotEmpty(t, errors, "Expected validation errors, but got none") + } else { + assert.Empty(t, errors, "Expected no validation errors, but got: %v", errors) + } + }) + } +} + +func TestValidateContainerResourceSource(t *testing.T) { + tests := []struct { + name string + src autoscalingv2.ContainerResourceMetricSource + wantErr bool + }{ + { + name: "valid container resource metric", + src: autoscalingv2.ContainerResourceMetricSource{ + Name: "cpu", + Container: "app", + Target: autoscalingv2.MetricTarget{ + Type: autoscalingv2.UtilizationMetricType, + AverageUtilization: ptr.To[int32](50), + }, + }, + wantErr: false, + }, + { + name: "missing resource name", + src: autoscalingv2.ContainerResourceMetricSource{ + Container: "app", + Target: autoscalingv2.MetricTarget{ + Type: autoscalingv2.UtilizationMetricType, + AverageUtilization: ptr.To[int32](50), + }, + }, + wantErr: true, + }, + { + name: "missing container name", + src: autoscalingv2.ContainerResourceMetricSource{ + Name: "cpu", + Target: autoscalingv2.MetricTarget{ + Type: autoscalingv2.UtilizationMetricType, + AverageUtilization: ptr.To[int32](50), + }, + }, + wantErr: true, + }, + { + name: "both average utilization and average value set", + src: autoscalingv2.ContainerResourceMetricSource{ + Name: "cpu", + Container: "app", + Target: autoscalingv2.MetricTarget{ + Type: autoscalingv2.UtilizationMetricType, + AverageUtilization: ptr.To[int32](50), + AverageValue: ptr.To(resource.MustParse("100m")), + }, + }, + wantErr: true, + }, + { + name: "neither average utilization nor average value set", + src: autoscalingv2.ContainerResourceMetricSource{ + Name: "cpu", + Container: "app", + Target: autoscalingv2.MetricTarget{ + Type: autoscalingv2.UtilizationMetricType, + }, + }, + wantErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + errors := validateContainerResourceSource(&tt.src, field.NewPath("test")) + if tt.wantErr { + assert.NotEmpty(t, errors, "Expected validation errors, but got none") + } else { + assert.Empty(t, errors, "Expected no validation errors, but got: %v", errors) + } + }) + } +} + +func TestValidateResourceSource(t *testing.T) { + tests := []struct { + name string + src autoscalingv2.ResourceMetricSource + wantErr bool + }{ + { + name: "valid utilization", + src: autoscalingv2.ResourceMetricSource{ + Name: "cpu", + Target: autoscalingv2.MetricTarget{ + Type: autoscalingv2.UtilizationMetricType, + AverageUtilization: ptr.To[int32](50), + }, + }, + wantErr: false, + }, + { + name: "valid average value", + src: autoscalingv2.ResourceMetricSource{ + Name: "memory", + Target: autoscalingv2.MetricTarget{ + Type: autoscalingv2.AverageValueMetricType, + AverageValue: ptr.To(resource.MustParse("100Mi")), + }, + }, + wantErr: false, + }, + { + name: "empty resource name", + src: autoscalingv2.ResourceMetricSource{ + Name: "", + Target: autoscalingv2.MetricTarget{ + Type: autoscalingv2.UtilizationMetricType, + AverageUtilization: ptr.To[int32](50), + }, + }, + wantErr: true, + }, + { + name: "missing target", + src: autoscalingv2.ResourceMetricSource{ + Name: "cpu", + }, + wantErr: true, + }, + { + name: "both utilization and value set", + src: autoscalingv2.ResourceMetricSource{ + Name: "cpu", + Target: autoscalingv2.MetricTarget{ + Type: autoscalingv2.UtilizationMetricType, + AverageUtilization: ptr.To[int32](50), + AverageValue: ptr.To(resource.MustParse("100m")), + }, + }, + wantErr: true, + }, + { + name: "neither utilization nor value set", + src: autoscalingv2.ResourceMetricSource{ + Name: "cpu", + Target: autoscalingv2.MetricTarget{ + Type: autoscalingv2.UtilizationMetricType, + }, + }, + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + errors := validateResourceSource(&tt.src, field.NewPath("test")) + if tt.wantErr { + assert.NotEmpty(t, errors, "Expected validation errors, but got none") + } else { + assert.Empty(t, errors, "Expected no validation errors, but got: %v", errors) + } + }) + } +} + +func TestValidateMetricTarget(t *testing.T) { + tests := []struct { + name string + target autoscalingv2.MetricTarget + wantErr bool + }{ + { + name: "valid utilization target", + target: autoscalingv2.MetricTarget{ + Type: autoscalingv2.UtilizationMetricType, + AverageUtilization: ptr.To[int32](80), + }, + wantErr: false, + }, + { + name: "valid value target", + target: autoscalingv2.MetricTarget{ + Type: autoscalingv2.ValueMetricType, + Value: ptr.To(resource.MustParse("100")), + }, + wantErr: false, + }, + { + name: "valid average value target", + target: autoscalingv2.MetricTarget{ + Type: autoscalingv2.AverageValueMetricType, + AverageValue: ptr.To(resource.MustParse("50")), + }, + wantErr: false, + }, + { + name: "missing type", + target: autoscalingv2.MetricTarget{ + AverageUtilization: ptr.To[int32](80), + }, + wantErr: true, + }, + { + name: "invalid type", + target: autoscalingv2.MetricTarget{ + Type: "InvalidType", + AverageUtilization: ptr.To[int32](80), + }, + wantErr: true, + }, + { + name: "negative utilization", + target: autoscalingv2.MetricTarget{ + Type: autoscalingv2.UtilizationMetricType, + AverageUtilization: ptr.To[int32](-1), + }, + wantErr: true, + }, + { + name: "negative value", + target: autoscalingv2.MetricTarget{ + Type: autoscalingv2.ValueMetricType, + Value: ptr.To(resource.MustParse("-100")), + }, + wantErr: true, + }, + { + name: "negative average value", + target: autoscalingv2.MetricTarget{ + Type: autoscalingv2.AverageValueMetricType, + AverageValue: ptr.To(resource.MustParse("-50")), + }, + wantErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + errors := validateMetricTarget(tt.target, field.NewPath("test")) + if tt.wantErr { + assert.NotEmpty(t, errors, "Expected validation errors, but got none") + } else { + assert.Empty(t, errors, "Expected no validation errors, but got: %v", errors) + } + }) + } +} + +func TestValidateMetricIdentifier(t *testing.T) { + tests := []struct { + name string + id autoscalingv2.MetricIdentifier + wantErr bool + }{ + { + name: "valid identifier", + id: autoscalingv2.MetricIdentifier{ + Name: "my-metric", + }, + wantErr: false, + }, + { + name: "empty name", + id: autoscalingv2.MetricIdentifier{ + Name: "", + }, + wantErr: true, + }, + { + name: "invalid name", + id: autoscalingv2.MetricIdentifier{ + Name: "my/metric", + }, + wantErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + errors := validateMetricIdentifier(tt.id, field.NewPath("test")) + if tt.wantErr { + assert.NotEmpty(t, errors, "Expected validation errors, but got none") + } else { + assert.Empty(t, errors, "Expected no validation errors, but got: %v", errors) + } + }) + } +} diff --git a/pkg/util/overridemanager/overridemanager.go b/pkg/util/overridemanager/overridemanager.go index 4663e7e36023..f6c7abbf4c39 100644 --- a/pkg/util/overridemanager/overridemanager.go +++ b/pkg/util/overridemanager/overridemanager.go @@ -19,14 +19,18 @@ package overridemanager import ( "context" "encoding/json" + "fmt" + "reflect" "sort" jsonpatch "github.com/evanphx/json-patch/v5" + "github.com/go-openapi/jsonpointer" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/client-go/tools/record" "k8s.io/klog/v2" "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/yaml" clusterv1alpha1 "github.com/karmada-io/karmada/pkg/apis/cluster/v1alpha1" policyv1alpha1 "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1" @@ -280,6 +284,52 @@ func applyJSONPatch(obj *unstructured.Unstructured, overrides []overrideOption) return err } +// applyRawJSONPatch applies the override on to the given raw json object. +func applyRawJSONPatch(raw []byte, overrides []overrideOption) ([]byte, error) { + jsonPatchBytes, err := json.Marshal(overrides) + if err != nil { + return nil, err + } + + patch, err := jsonpatch.DecodePatch(jsonPatchBytes) + if err != nil { + return nil, err + } + + return patch.Apply(raw) +} + +func applyRawYAMLPatch(raw []byte, overrides []overrideOption) ([]byte, error) { + rawJSON, err := yaml.YAMLToJSON(raw) + if err != nil { + klog.ErrorS(err, "Failed to convert yaml to json") + return nil, err + } + + jsonPatchBytes, err := json.Marshal(overrides) + if err != nil { + return nil, err + } + + patch, err := jsonpatch.DecodePatch(jsonPatchBytes) + if err != nil { + return nil, err + } + + rawJSON, err = patch.Apply(rawJSON) + if err != nil { + return nil, err + } + + rawYAML, err := yaml.JSONToYAML(rawJSON) + if err != nil { + klog.Errorf("Failed to convert json to yaml, error: %v", err) + return nil, err + } + + return rawYAML, nil +} + // applyPolicyOverriders applies OverridePolicy/ClusterOverridePolicy overriders to target object func applyPolicyOverriders(rawObj *unstructured.Unstructured, overriders policyv1alpha1.Overriders) error { err := applyImageOverriders(rawObj, overriders.ImageOverrider) @@ -300,6 +350,9 @@ func applyPolicyOverriders(rawObj *unstructured.Unstructured, overriders policyv if err := applyAnnotationsOverriders(rawObj, overriders.AnnotationsOverrider); err != nil { return err } + if err := applyFieldOverriders(rawObj, overriders.FieldOverrider); err != nil { + return err + } return applyJSONPatch(rawObj, parseJSONPatchesByPlaintext(overriders.Plaintext)) } @@ -352,6 +405,50 @@ func applyArgsOverriders(rawObj *unstructured.Unstructured, argsOverriders []pol return nil } +func applyFieldOverriders(rawObj *unstructured.Unstructured, FieldOverriders []policyv1alpha1.FieldOverrider) error { + if len(FieldOverriders) == 0 { + return nil + } + for index := range FieldOverriders { + pointer, err := jsonpointer.New(FieldOverriders[index].FieldPath) + if err != nil { + klog.Errorf("Build jsonpointer with overrider's path err: %v", err) + return err + } + res, kind, err := pointer.Get(rawObj.Object) + if err != nil { + klog.Errorf("Get value by overrider's path err: %v", err) + return err + } + if kind != reflect.String { + errMsg := fmt.Sprintf("Get object's value by overrider's path(%s) is not string", FieldOverriders[index].FieldPath) + klog.Errorf(errMsg) + return fmt.Errorf(errMsg) + } + dataBytes := []byte(res.(string)) + klog.V(4).Infof("Parsed JSON patches by FieldOverriders[%d](%+v)", index, FieldOverriders[index]) + var appliedRawData []byte + if len(FieldOverriders[index].YAML) > 0 { + appliedRawData, err = applyRawYAMLPatch(dataBytes, parseYAMLPatchesByField(FieldOverriders[index].YAML)) + if err != nil { + klog.Errorf("Error applying raw JSON patch: %v", err) + return err + } + } else if len(FieldOverriders[index].JSON) > 0 { + appliedRawData, err = applyRawJSONPatch(dataBytes, parseJSONPatchesByField(FieldOverriders[index].JSON)) + if err != nil { + klog.Errorf("Error applying raw YAML patch: %v", err) + return err + } + } + _, err = pointer.Set(rawObj.Object, string(appliedRawData)) + if err != nil { + return err + } + } + return nil +} + func parseJSONPatchesByPlaintext(overriders []policyv1alpha1.PlaintextOverrider) []overrideOption { patches := make([]overrideOption, 0, len(overriders)) for i := range overriders { @@ -363,3 +460,27 @@ func parseJSONPatchesByPlaintext(overriders []policyv1alpha1.PlaintextOverrider) } return patches } + +func parseYAMLPatchesByField(overriders []policyv1alpha1.YAMLPatchOperation) []overrideOption { + patches := make([]overrideOption, 0, len(overriders)) + for i := range overriders { + patches = append(patches, overrideOption{ + Op: string(overriders[i].Operator), + Path: overriders[i].SubPath, + Value: overriders[i].Value, + }) + } + return patches +} + +func parseJSONPatchesByField(overriders []policyv1alpha1.JSONPatchOperation) []overrideOption { + patches := make([]overrideOption, 0, len(overriders)) + for i := range overriders { + patches = append(patches, overrideOption{ + Op: string(overriders[i].Operator), + Path: overriders[i].SubPath, + Value: overriders[i].Value, + }) + } + return patches +} diff --git a/pkg/util/overridemanager/overridemanager_test.go b/pkg/util/overridemanager/overridemanager_test.go index e703d5c6adc1..ae66e6cee586 100644 --- a/pkg/util/overridemanager/overridemanager_test.go +++ b/pkg/util/overridemanager/overridemanager_test.go @@ -34,7 +34,7 @@ import ( "github.com/karmada-io/karmada/test/helper" ) -func Test_overrideManagerImpl_ApplyOverridePolicies(t *testing.T) { +func Test_overrideManagerImpl_ApplyLabelAnnotationOverriderPolicies(t *testing.T) { deployment := helper.NewDeployment(metav1.NamespaceDefault, "test1") deployment.Labels = map[string]string{ "testLabel": "testLabel", @@ -442,3 +442,242 @@ func TestGetMatchingOverridePolicies(t *testing.T) { }) } } + +func Test_overrideManagerImpl_ApplyFieldOverriderPolicies_YAML(t *testing.T) { + configmap := helper.NewConfigMap(metav1.NamespaceDefault, "test1", map[string]string{ + "test.yaml": ` +key: + key1: value +`, + }) + configmapObj, _ := utilhelper.ToUnstructured(configmap) + + type fields struct { + Client client.Client + EventRecorder record.EventRecorder + } + type args struct { + rawObj *unstructured.Unstructured + clusterName string + } + tests := []struct { + name string + fields fields + args args + wantCOP *AppliedOverrides + wantOP *AppliedOverrides + wantErr bool + }{ + { + name: "test yaml overridePolicies", + fields: fields{ + Client: fake.NewClientBuilder().WithScheme(gclient.NewSchema()).WithObjects(helper.NewCluster("test1"), + &policyv1alpha1.OverridePolicy{ + ObjectMeta: metav1.ObjectMeta{Name: "test1", Namespace: metav1.NamespaceDefault}, + Spec: policyv1alpha1.OverrideSpec{ + ResourceSelectors: []policyv1alpha1.ResourceSelector{ + { + APIVersion: "v1", + Kind: "ConfigMap", + Namespace: "default", + Name: "test1", + }, + }, + OverrideRules: []policyv1alpha1.RuleWithCluster{ + { + TargetCluster: &policyv1alpha1.ClusterAffinity{ClusterNames: []string{"test1"}}, + Overriders: policyv1alpha1.Overriders{ + FieldOverrider: []policyv1alpha1.FieldOverrider{ + { + FieldPath: "/data/test.yaml", + YAML: []policyv1alpha1.YAMLPatchOperation{ + { + SubPath: "/key/key1", + Operator: policyv1alpha1.OverriderOpReplace, + Value: apiextensionsv1.JSON{Raw: []byte(`"updated_value"`)}, + }, + }, + }, + }, + }, + }, + }, + }, + }, + ).Build(), + EventRecorder: &record.FakeRecorder{}, + }, + args: args{ + rawObj: configmapObj, + clusterName: "test1", + }, + wantCOP: nil, + wantOP: &AppliedOverrides{ + AppliedItems: []OverridePolicyShadow{ + { + PolicyName: "test1", + Overriders: policyv1alpha1.Overriders{ + FieldOverrider: []policyv1alpha1.FieldOverrider{ + { + FieldPath: "/data/test.yaml", + YAML: []policyv1alpha1.YAMLPatchOperation{ + { + SubPath: "/key/key1", + Operator: policyv1alpha1.OverriderOpReplace, + Value: apiextensionsv1.JSON{Raw: []byte(`"updated_value"`)}, + }, + }, + }, + }, + }, + }, + }, + }, + wantErr: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + o := &overrideManagerImpl{ + Client: tt.fields.Client, + EventRecorder: tt.fields.EventRecorder, + } + gotCOP, gotOP, err := o.ApplyOverridePolicies(tt.args.rawObj, tt.args.clusterName) + if (err != nil) != tt.wantErr { + t.Errorf("ApplyOverridePolicies() error = %v, wantErr %v", err, tt.wantErr) + return + } + if !reflect.DeepEqual(gotCOP, tt.wantCOP) { + t.Errorf("ApplyOverridePolicies() gotCOP = %v, wantCOP %v", gotCOP, tt.wantCOP) + } + if !reflect.DeepEqual(gotOP, tt.wantOP) { + t.Errorf("ApplyOverridePolicies() gotOP = %v, wantOP %v", gotOP, tt.wantOP) + } + wantData := map[string]interface{}{ + "test.yaml": `key: + key1: updated_value +`, + } + if !reflect.DeepEqual(tt.args.rawObj.Object["data"], wantData) { + t.Errorf("ApplyOverridePolicies() gotData = %v, wantData %v", tt.args.rawObj.Object["data"], wantData) + } + }) + } +} + +func Test_overrideManagerImpl_ApplyJSONOverridePolicies_JSON(t *testing.T) { + configmap := helper.NewConfigMap(metav1.NamespaceDefault, "test1", map[string]string{ + "test.json": `{"key":{"key1":"value"}}`, + }) + configmapObj, _ := utilhelper.ToUnstructured(configmap) + + type fields struct { + Client client.Client + EventRecorder record.EventRecorder + } + type args struct { + rawObj *unstructured.Unstructured + clusterName string + } + tests := []struct { + name string + fields fields + args args + wantCOP *AppliedOverrides + wantOP *AppliedOverrides + wantErr bool + }{ + { + name: "test yaml overridePolicies", + fields: fields{ + Client: fake.NewClientBuilder().WithScheme(gclient.NewSchema()).WithObjects(helper.NewCluster("test1"), + &policyv1alpha1.OverridePolicy{ + ObjectMeta: metav1.ObjectMeta{Name: "test1", Namespace: metav1.NamespaceDefault}, + Spec: policyv1alpha1.OverrideSpec{ + ResourceSelectors: []policyv1alpha1.ResourceSelector{ + { + APIVersion: "v1", + Kind: "ConfigMap", + Namespace: "default", + Name: "test1", + }, + }, + OverrideRules: []policyv1alpha1.RuleWithCluster{ + { + TargetCluster: &policyv1alpha1.ClusterAffinity{ClusterNames: []string{"test1"}}, + Overriders: policyv1alpha1.Overriders{ + FieldOverrider: []policyv1alpha1.FieldOverrider{ + { + FieldPath: "/data/test.json", + JSON: []policyv1alpha1.JSONPatchOperation{ + { + SubPath: "/key/key1", + Operator: policyv1alpha1.OverriderOpReplace, + Value: apiextensionsv1.JSON{Raw: []byte(`"updated_value"`)}, + }, + }, + }, + }, + }, + }, + }, + }, + }, + ).Build(), + EventRecorder: &record.FakeRecorder{}, + }, + args: args{ + rawObj: configmapObj, + clusterName: "test1", + }, + wantCOP: nil, + wantOP: &AppliedOverrides{ + AppliedItems: []OverridePolicyShadow{ + { + PolicyName: "test1", + Overriders: policyv1alpha1.Overriders{ + FieldOverrider: []policyv1alpha1.FieldOverrider{ + { + FieldPath: "/data/test.json", + JSON: []policyv1alpha1.JSONPatchOperation{ + { + SubPath: "/key/key1", + Operator: policyv1alpha1.OverriderOpReplace, + Value: apiextensionsv1.JSON{Raw: []byte(`"updated_value"`)}, + }, + }, + }, + }, + }, + }, + }, + }, + wantErr: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + o := &overrideManagerImpl{ + Client: tt.fields.Client, + EventRecorder: tt.fields.EventRecorder, + } + gotCOP, gotOP, err := o.ApplyOverridePolicies(tt.args.rawObj, tt.args.clusterName) + if (err != nil) != tt.wantErr { + t.Errorf("ApplyOverridePolicies() error = %v, wantErr %v", err, tt.wantErr) + return + } + if !reflect.DeepEqual(gotCOP, tt.wantCOP) { + t.Errorf("ApplyOverridePolicies() gotCOP = %v, wantCOP %v", gotCOP, tt.wantCOP) + } + if !reflect.DeepEqual(gotOP, tt.wantOP) { + t.Errorf("ApplyOverridePolicies() gotOP = %v, wantOP %v", gotOP, tt.wantOP) + } + wantData := map[string]interface{}{ + "test.json": `{"key":{"key1":"updated_value"}}`, + } + if !reflect.DeepEqual(tt.args.rawObj.Object["data"], wantData) { + t.Errorf("ApplyOverridePolicies() gotData = %v, wantData %v", tt.args.rawObj.Object["data"], wantData) + } + }) + } +} diff --git a/pkg/util/validation/validation.go b/pkg/util/validation/validation.go index 3c3cfc8ae577..0fdb9861d6a8 100644 --- a/pkg/util/validation/validation.go +++ b/pkg/util/validation/validation.go @@ -18,9 +18,10 @@ package validation import ( "fmt" - "strings" + "github.com/go-openapi/jsonpointer" corev1 "k8s.io/api/core/v1" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" apivalidation "k8s.io/apimachinery/pkg/api/validation" metav1validation "k8s.io/apimachinery/pkg/apis/meta/v1/validation" "k8s.io/apimachinery/pkg/util/validation" @@ -304,13 +305,70 @@ func ValidateOverrideRules(overrideRules []policyv1alpha1.RuleWithCluster, fldPa // validates predicate path. for imageIndex, image := range rule.Overriders.ImageOverrider { imagePath := rulePath.Child("overriders").Child("imageOverrider").Index(imageIndex) - if image.Predicate != nil && !strings.HasPrefix(image.Predicate.Path, "/") { - allErrs = append(allErrs, field.Invalid(imagePath.Child("predicate").Child("path"), image.Predicate.Path, "path should be start with / character")) + if image.Predicate != nil { + if _, err := jsonpointer.New(image.Predicate.Path); err != nil { + allErrs = append(allErrs, field.Invalid(imagePath.Child("predicate").Child("path"), image.Predicate.Path, err.Error())) + } } } + for fieldIndex, fieldOverrider := range rule.Overriders.FieldOverrider { + fieldPath := rulePath.Child("overriders").Child("fieldOverrider").Index(fieldIndex) + // validates that either YAML or JSON is selected for each field overrider. + if len(fieldOverrider.YAML) > 0 && len(fieldOverrider.JSON) > 0 { + allErrs = append(allErrs, field.Invalid(fieldPath, fieldOverrider, "FieldOverrider has both YAML and JSON set. Only one is allowed")) + } + // validates the field path. + if _, err := jsonpointer.New(fieldOverrider.FieldPath); err != nil { + allErrs = append(allErrs, field.Invalid(fieldPath.Child("fieldPath"), fieldOverrider.FieldPath, err.Error())) + } + // validates the JSON patch operations sub path. + allErrs = append(allErrs, validateJSONPatchSubPaths(fieldOverrider.JSON, fieldPath.Child("json"))...) + // validates the YAML patch operations sub path. + allErrs = append(allErrs, validateYAMLPatchSubPaths(fieldOverrider.YAML, fieldPath.Child("yaml"))...) + } + // validates the targetCluster. allErrs = append(allErrs, ValidateClusterAffinity(rule.TargetCluster, rulePath.Child("targetCluster"))...) } return allErrs } + +func validateJSONPatchSubPaths(patches []policyv1alpha1.JSONPatchOperation, fieldPath *field.Path) field.ErrorList { + var allErrs field.ErrorList + for index, patch := range patches { + patchPath := fieldPath.Index(index) + if _, err := jsonpointer.New(patch.SubPath); err != nil { + allErrs = append(allErrs, field.Invalid(patchPath.Child("subPath"), patch.SubPath, err.Error())) + } + allErrs = append(allErrs, validateOverrideOperator(patch.Operator, patch.Value, patchPath.Child("value"))...) + } + return allErrs +} + +func validateYAMLPatchSubPaths(patches []policyv1alpha1.YAMLPatchOperation, fieldPath *field.Path) field.ErrorList { + var allErrs field.ErrorList + for index, patch := range patches { + patchPath := fieldPath.Index(index) + if _, err := jsonpointer.New(patch.SubPath); err != nil { + allErrs = append(allErrs, field.Invalid(patchPath.Child("subPath"), patch.SubPath, err.Error())) + } + allErrs = append(allErrs, validateOverrideOperator(patch.Operator, patch.Value, patchPath.Child("value"))...) + } + return allErrs +} + +func validateOverrideOperator(operator policyv1alpha1.OverriderOperator, value apiextensionsv1.JSON, fldPath *field.Path) field.ErrorList { + var allErrs field.ErrorList + switch operator { + case policyv1alpha1.OverriderOpAdd, policyv1alpha1.OverriderOpReplace: + if value.Size() == 0 { + allErrs = append(allErrs, field.Invalid(fldPath, value, "value is required for add or replace operation")) + } + case policyv1alpha1.OverriderOpRemove: + if value.Size() != 0 { + allErrs = append(allErrs, field.Invalid(fldPath, value, "value is not allowed for remove operation")) + } + } + return allErrs +} diff --git a/pkg/webhook/federatedhpa/mutating_test.go b/pkg/webhook/federatedhpa/mutating_test.go new file mode 100644 index 000000000000..b43d5a0487a6 --- /dev/null +++ b/pkg/webhook/federatedhpa/mutating_test.go @@ -0,0 +1,214 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package federatedhpa + +import ( + "context" + "encoding/json" + "errors" + "net/http" + "reflect" + "testing" + + admissionv1 "k8s.io/api/admission/v1" + autoscalingv2 "k8s.io/api/autoscaling/v2" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/utils/ptr" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" + + autoscalingv1alpha1 "github.com/karmada-io/karmada/pkg/apis/autoscaling/v1alpha1" + "github.com/karmada-io/karmada/pkg/util/lifted" +) + +type fakeMutationDecoder struct { + err error + obj runtime.Object +} + +// Decode mocks the Decode method of admission.Decoder. +func (f *fakeMutationDecoder) Decode(_ admission.Request, obj runtime.Object) error { + if f.err != nil { + return f.err + } + if f.obj != nil { + reflect.ValueOf(obj).Elem().Set(reflect.ValueOf(f.obj).Elem()) + } + return nil +} + +// DecodeRaw mocks the DecodeRaw method of admission.Decoder. +func (f *fakeMutationDecoder) DecodeRaw(_ runtime.RawExtension, obj runtime.Object) error { + if f.err != nil { + return f.err + } + if f.obj != nil { + reflect.ValueOf(obj).Elem().Set(reflect.ValueOf(f.obj).Elem()) + } + return nil +} + +func TestMutatingAdmission_Handle(t *testing.T) { + tests := []struct { + name string + decoder admission.Decoder + req admission.Request + want admission.Response + }{ + { + name: "Handle_DecodeError_DeniesAdmission", + decoder: &fakeValidationDecoder{ + err: errors.New("decode error"), + }, + req: admission.Request{}, + want: admission.Errored(http.StatusBadRequest, errors.New("decode error")), + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + m := MutatingAdmission{ + Decoder: tt.decoder, + } + got := m.Handle(context.Background(), tt.req) + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("Handle() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestMutatingAdmission_Handle_FullCoverage(t *testing.T) { + // Define the federatedhpa object name and namespace to be used in the test. + name := "test-app" + namespace := "test-namespace" + + // Mock an admission request request. + req := admission.Request{ + AdmissionRequest: admissionv1.AdmissionRequest{ + Operation: admissionv1.Create, + }, + } + + // Create the initial federatedhpa with default values for testing. + federatedHPAObj := &autoscalingv1alpha1.FederatedHPA{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + Spec: autoscalingv1alpha1.FederatedHPASpec{ + ScaleTargetRef: autoscalingv2.CrossVersionObjectReference{ + APIVersion: "apps/v1", + Kind: "Deployment", + Name: name, + }, + MinReplicas: nil, + MaxReplicas: 10, + Behavior: &autoscalingv2.HorizontalPodAutoscalerBehavior{}, + Metrics: []autoscalingv2.MetricSpec{}, + }, + } + + // Define the expected federatedhpa object after mutations. + wantFederatedHPAObj := &autoscalingv1alpha1.FederatedHPA{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + Spec: autoscalingv1alpha1.FederatedHPASpec{ + ScaleTargetRef: autoscalingv2.CrossVersionObjectReference{ + APIVersion: "apps/v1", + Kind: "Deployment", + Name: name, + }, + MinReplicas: ptr.To[int32](1), + MaxReplicas: 10, + Behavior: &autoscalingv2.HorizontalPodAutoscalerBehavior{ + ScaleUp: &autoscalingv2.HPAScalingRules{ + StabilizationWindowSeconds: ptr.To[int32](0), + SelectPolicy: ptr.To[autoscalingv2.ScalingPolicySelect](autoscalingv2.MaxChangePolicySelect), + Policies: []autoscalingv2.HPAScalingPolicy{ + { + Type: autoscalingv2.PodsScalingPolicy, + Value: 4, + PeriodSeconds: 15, + }, + { + Type: autoscalingv2.PercentScalingPolicy, + Value: 100, + PeriodSeconds: 15, + }, + }, + }, + ScaleDown: &autoscalingv2.HPAScalingRules{ + StabilizationWindowSeconds: nil, + SelectPolicy: ptr.To[autoscalingv2.ScalingPolicySelect](autoscalingv2.MaxChangePolicySelect), + Policies: []autoscalingv2.HPAScalingPolicy{ + { + Type: autoscalingv2.PercentScalingPolicy, + Value: 100, + PeriodSeconds: 15, + }, + }, + }, + }, + Metrics: []autoscalingv2.MetricSpec{ + { + Type: autoscalingv2.ResourceMetricSourceType, + Resource: &autoscalingv2.ResourceMetricSource{ + Name: corev1.ResourceCPU, + Target: autoscalingv2.MetricTarget{ + Type: autoscalingv2.UtilizationMetricType, + AverageUtilization: ptr.To[int32](lifted.DefaultCPUUtilization), + }, + }, + }, + }, + }, + } + + // Mock decoder that decodes the request into the federatedhpa object. + decoder := &fakeMutationDecoder{ + obj: federatedHPAObj, + } + + // Marshal the expected federatedhpa object to simulate the final mutated object. + wantBytes, err := json.Marshal(wantFederatedHPAObj) + if err != nil { + t.Fatalf("Failed to marshal expected federatedHPA object: %v", err) + } + req.Object.Raw = wantBytes + + // Instantiate the mutating handler. + mutatingHandler := MutatingAdmission{ + Decoder: decoder, + } + + // Call the Handle function. + got := mutatingHandler.Handle(context.Background(), req) + + // Check if there are any patches applied. There should be no patches if the federatedhpa object is handled correctly. + if len(got.Patches) > 0 { + t.Errorf("Handle() returned patches, but no patches were expected. Got patches: %v", got.Patches) + } + + // Check if the admission request was allowed. + if !got.Allowed { + t.Errorf("Handle() got.Allowed = false, want true") + } +} diff --git a/pkg/webhook/federatedhpa/validating_test.go b/pkg/webhook/federatedhpa/validating_test.go new file mode 100644 index 000000000000..155a03b0c277 --- /dev/null +++ b/pkg/webhook/federatedhpa/validating_test.go @@ -0,0 +1,276 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package federatedhpa + +import ( + "context" + "errors" + "net/http" + "reflect" + "strings" + "testing" + + autoscalingv2 "k8s.io/api/autoscaling/v2" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/utils/ptr" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" + + autoscalingv1alpha1 "github.com/karmada-io/karmada/pkg/apis/autoscaling/v1alpha1" +) + +// ResponseType represents the type of admission response. +type ResponseType string + +const ( + Denied ResponseType = "Denied" + Allowed ResponseType = "Allowed" + Errored ResponseType = "Errored" +) + +// TestResponse is used to define expected response in a test case. +type TestResponse struct { + Type ResponseType + Message string +} + +type fakeValidationDecoder struct { + err error + obj runtime.Object +} + +// Decode mocks the Decode method of admission.Decoder. +func (f *fakeValidationDecoder) Decode(_ admission.Request, obj runtime.Object) error { + if f.err != nil { + return f.err + } + if f.obj != nil { + reflect.ValueOf(obj).Elem().Set(reflect.ValueOf(f.obj).Elem()) + } + return nil +} + +// DecodeRaw mocks the DecodeRaw method of admission.Decoder. +func (f *fakeValidationDecoder) DecodeRaw(_ runtime.RawExtension, obj runtime.Object) error { + if f.err != nil { + return f.err + } + if f.obj != nil { + reflect.ValueOf(obj).Elem().Set(reflect.ValueOf(f.obj).Elem()) + } + return nil +} + +func TestValidatingAdmission_Handle(t *testing.T) { + tests := []struct { + name string + decoder admission.Decoder + req admission.Request + want TestResponse + }{ + { + name: "Handle_DecodeError_DeniesAdmission", + decoder: &fakeValidationDecoder{ + err: errors.New("decode error"), + }, + req: admission.Request{}, + want: TestResponse{ + Type: Errored, + Message: "decode error", + }, + }, + { + name: "Handle_ValidationFederatedHPASpecFails_DeniesAdmission", + decoder: &fakeValidationDecoder{ + obj: &autoscalingv1alpha1.FederatedHPA{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-app", + Namespace: "test-namespace", + }, + Spec: autoscalingv1alpha1.FederatedHPASpec{ + ScaleTargetRef: autoscalingv2.CrossVersionObjectReference{ + APIVersion: "apps/v1", + Kind: "Deployment", + Name: "test-app", + }, + MinReplicas: ptr.To[int32](0), + MaxReplicas: 10, + Behavior: &autoscalingv2.HorizontalPodAutoscalerBehavior{ + ScaleUp: &autoscalingv2.HPAScalingRules{ + StabilizationWindowSeconds: ptr.To[int32](0), + Policies: []autoscalingv2.HPAScalingPolicy{ + { + PeriodSeconds: 15, + Type: autoscalingv2.PodsScalingPolicy, + Value: 4, + }, + { + PeriodSeconds: 15, + Type: autoscalingv2.PercentScalingPolicy, + Value: 100, + }, + }, + }, + ScaleDown: &autoscalingv2.HPAScalingRules{ + StabilizationWindowSeconds: ptr.To[int32](300), + Policies: []autoscalingv2.HPAScalingPolicy{ + { + PeriodSeconds: 15, + Type: autoscalingv2.PercentScalingPolicy, + Value: 100, + }, + }, + }, + }, + Metrics: []autoscalingv2.MetricSpec{ + { + Type: autoscalingv2.PodsMetricSourceType, + Pods: &autoscalingv2.PodsMetricSource{ + Metric: autoscalingv2.MetricIdentifier{ + Name: "http_requests", + }, + Target: autoscalingv2.MetricTarget{ + AverageValue: resource.NewMilliQuantity(700, resource.DecimalSI), + Type: autoscalingv2.ValueMetricType, + }, + }, + }, + }, + }, + Status: autoscalingv2.HorizontalPodAutoscalerStatus{ + CurrentReplicas: 2, + DesiredReplicas: 2, + }, + }, + }, + req: admission.Request{}, + want: TestResponse{ + Type: Denied, + Message: "spec.minReplicas: Invalid value: 0", + }, + }, + { + name: "Handle_ValidationSucceeds_AllowsAdmission", + decoder: &fakeValidationDecoder{ + obj: &autoscalingv1alpha1.FederatedHPA{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-app", + Namespace: "test-namespace", + }, + Spec: autoscalingv1alpha1.FederatedHPASpec{ + ScaleTargetRef: autoscalingv2.CrossVersionObjectReference{ + APIVersion: "apps/v1", + Kind: "Deployment", + Name: "test-app", + }, + MinReplicas: ptr.To[int32](1), + MaxReplicas: 10, + Behavior: &autoscalingv2.HorizontalPodAutoscalerBehavior{ + ScaleUp: &autoscalingv2.HPAScalingRules{ + StabilizationWindowSeconds: ptr.To[int32](0), + Policies: []autoscalingv2.HPAScalingPolicy{ + { + PeriodSeconds: 15, + Type: autoscalingv2.PodsScalingPolicy, + Value: 4, + }, + { + PeriodSeconds: 15, + Type: autoscalingv2.PercentScalingPolicy, + Value: 100, + }, + }, + }, + ScaleDown: &autoscalingv2.HPAScalingRules{ + StabilizationWindowSeconds: ptr.To[int32](300), + Policies: []autoscalingv2.HPAScalingPolicy{ + { + PeriodSeconds: 15, + Type: autoscalingv2.PercentScalingPolicy, + Value: 100, + }, + }, + }, + }, + Metrics: []autoscalingv2.MetricSpec{ + { + Type: autoscalingv2.PodsMetricSourceType, + Pods: &autoscalingv2.PodsMetricSource{ + Metric: autoscalingv2.MetricIdentifier{ + Name: "http_requests", + }, + Target: autoscalingv2.MetricTarget{ + AverageValue: resource.NewMilliQuantity(700, resource.DecimalSI), + Type: autoscalingv2.ValueMetricType, + }, + }, + }, + }, + }, + Status: autoscalingv2.HorizontalPodAutoscalerStatus{ + CurrentReplicas: 2, + DesiredReplicas: 2, + }, + }, + }, + req: admission.Request{}, + want: TestResponse{ + Type: Allowed, + Message: "", + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + v := &ValidatingAdmission{ + Decoder: tt.decoder, + } + got := v.Handle(context.Background(), tt.req) + + // Extract type and message from the actual response. + gotType := extractResponseType(got) + gotMessage := extractErrorMessage(got) + + if gotType != tt.want.Type || !strings.Contains(gotMessage, tt.want.Message) { + t.Errorf("Handle() = {Type: %v, Message: %v}, want {Type: %v, Message: %v}", gotType, gotMessage, tt.want.Type, tt.want.Message) + } + }) + } +} + +// extractResponseType extracts the type of admission response. +func extractResponseType(resp admission.Response) ResponseType { + if resp.Allowed { + return Allowed + } + if resp.Result != nil { + if resp.Result.Code == http.StatusBadRequest { + return Errored + } + } + return Denied +} + +// extractErrorMessage extracts the error message from a Denied/Errored response. +func extractErrorMessage(resp admission.Response) string { + if !resp.Allowed && resp.Result != nil { + return resp.Result.Message + } + return "" +} diff --git a/pkg/webhook/federatedresourcequota/validating_test.go b/pkg/webhook/federatedresourcequota/validating_test.go index 7773f31c1113..a1360b6ea367 100644 --- a/pkg/webhook/federatedresourcequota/validating_test.go +++ b/pkg/webhook/federatedresourcequota/validating_test.go @@ -78,9 +78,11 @@ func (f *fakeValidationDecoder) DecodeRaw(_ runtime.RawExtension, obj runtime.Ob return nil } -// sortAndJoinMessages sorts and joins error message parts to ensure consistent ordering. +// normalizedMessages trim the square brackets, and sorted error message parts to ensure consistent ordering. // This prevents test flakiness caused by varying error message order. -func sortAndJoinMessages(message string) string { +func normalizedMessages(message string) string { + message = strings.TrimLeft(message, "[") + message = strings.TrimRight(message, "]") parts := strings.Split(message, ", ") sort.Strings(parts) return strings.Join(parts, ", ") @@ -421,8 +423,8 @@ func TestValidatingAdmission_Handle(t *testing.T) { Decoder: tt.decoder, } got := v.Handle(context.Background(), tt.req) - got.Result.Message = sortAndJoinMessages(got.Result.Message) - tt.want.Message = sortAndJoinMessages(tt.want.Message) + got.Result.Message = normalizedMessages(got.Result.Message) + tt.want.Message = normalizedMessages(tt.want.Message) // Extract type and message from the actual response. gotType := extractResponseType(got) diff --git a/pkg/webhook/interpreter/decode.go b/pkg/webhook/interpreter/decode.go index f10c5a416489..ea4bce532967 100644 --- a/pkg/webhook/interpreter/decode.go +++ b/pkg/webhook/interpreter/decode.go @@ -43,7 +43,7 @@ func NewDecoder(scheme *runtime.Scheme) *Decoder { // It errors out if req.Object.Raw is empty i.e. containing 0 raw bytes. func (d *Decoder) Decode(req Request, into runtime.Object) error { if len(req.Object.Raw) == 0 { - return fmt.Errorf("there is no context to decode") + return fmt.Errorf("there is no content to decode") } return d.DecodeRaw(req.Object, into) } diff --git a/pkg/webhook/interpreter/decode_test.go b/pkg/webhook/interpreter/decode_test.go new file mode 100644 index 000000000000..9b3580389056 --- /dev/null +++ b/pkg/webhook/interpreter/decode_test.go @@ -0,0 +1,277 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package interpreter + +import ( + "fmt" + "strings" + "testing" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + + configv1alpha1 "github.com/karmada-io/karmada/pkg/apis/config/v1alpha1" +) + +type Interface interface { + GetAPIVersion() string + GetKind() string + GetName() string +} + +// MyTestPod represents a simplified version of a Kubernetes Pod for testing purposes. +// It includes basic fields such as API version, kind, and metadata. +type MyTestPod struct { + APIVersion string `json:"apiVersion"` + Kind string `json:"kind"` + Metadata struct { + Name string `json:"name"` + } `json:"metadata"` +} + +// DeepCopyObject creates a deep copy of the MyTestPod instance. +// This method is part of the runtime.Object interface and ensures that modifications +// to the copy do not affect the original object. +func (p *MyTestPod) DeepCopyObject() runtime.Object { + return &MyTestPod{ + APIVersion: p.APIVersion, + Kind: p.Kind, + Metadata: p.Metadata, + } +} + +// GetObjectKind returns the schema.ObjectKind for the MyTestPod instance. +// This method is part of the runtime.Object interface and provides the API version +// and kind of the object, which is used for object identification in Kubernetes. +func (p *MyTestPod) GetObjectKind() schema.ObjectKind { + return &metav1.TypeMeta{ + APIVersion: p.APIVersion, + Kind: p.Kind, + } +} + +// GetAPIVersion returns the API version of the MyTestPod. +func (p *MyTestPod) GetAPIVersion() string { + return p.APIVersion +} + +// GetKind returns the kind of the MyTestPod. +func (p *MyTestPod) GetKind() string { + return p.Kind +} + +// GetName returns the name of the MyTestPod. +func (p *MyTestPod) GetName() string { + return p.Metadata.Name +} + +func TestNewDecoder(t *testing.T) { + tests := []struct { + name string + }{ + { + name: "NewDecoder_ValidDecoder_DecoderIsValid", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + scheme := runtime.NewScheme() + decoder := NewDecoder(scheme) + if decoder == nil { + t.Errorf("expected decoder to not be nil") + } + }) + } +} + +func TestDecodeRaw(t *testing.T) { + tests := []struct { + name string + apiVersion string + kind string + objName string + rawObj *runtime.RawExtension + into Interface + prep func(re *runtime.RawExtension, apiVersion, kind, name string) error + verify func(into Interface, apiVersion, kind, name string) error + wantErr bool + errMsg string + }{ + { + name: "DecodeRaw_ValidRaw_DecodeRawIsSuccessful", + objName: "test-pod", + kind: "Pod", + apiVersion: "v1", + rawObj: &runtime.RawExtension{ + Raw: []byte{}, + }, + into: &unstructured.Unstructured{}, + prep: func(re *runtime.RawExtension, apiVersion, kind, name string) error { + re.Raw = []byte(fmt.Sprintf(`{"apiVersion": "%s", "kind": "%s", "metadata": {"name": "%s"}}`, apiVersion, kind, name)) + return nil + }, + verify: verifyRuntimeObject, + wantErr: false, + }, + { + name: "DecodeRaw_IntoNonUnstructuredType_RawDecoded", + objName: "test-pod", + kind: "Pod", + apiVersion: "v1", + rawObj: &runtime.RawExtension{ + Raw: []byte{}, + }, + into: &MyTestPod{}, + prep: func(re *runtime.RawExtension, apiVersion, kind, name string) error { + re.Raw = []byte(fmt.Sprintf(`{"apiVersion": "%s", "kind": "%s", "metadata": {"name": "%s"}}`, apiVersion, kind, name)) + return nil + }, + verify: verifyRuntimeObject, + wantErr: false, + }, + { + name: "DecodeRaw_EmptyRaw_NoContentToDecode", + rawObj: &runtime.RawExtension{ + Raw: []byte{}, + }, + into: &unstructured.Unstructured{}, + prep: func(*runtime.RawExtension, string, string, string) error { return nil }, + verify: func(Interface, string, string, string) error { return nil }, + wantErr: true, + errMsg: "there is no content to decode", + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + if err := test.prep(test.rawObj, test.apiVersion, test.kind, test.objName); err != nil { + t.Errorf("failed to prep the runtime raw extension object: %v", err) + } + scheme := runtime.NewScheme() + decoder := NewDecoder(scheme) + intoObj, ok := test.into.(runtime.Object) + if !ok { + t.Errorf("failed to type assert into object into runtime rawextension") + } + err := decoder.DecodeRaw(*test.rawObj, intoObj) + if err != nil && !test.wantErr { + t.Errorf("unexpected error while decoding the raw: %v", err) + } + if err == nil && test.wantErr { + t.Errorf("expected an error, but got none") + } + if err != nil && test.wantErr && !strings.Contains(err.Error(), test.errMsg) { + t.Errorf("expected %s error msg to contain %s", err.Error(), test.errMsg) + } + if err := test.verify(test.into, test.apiVersion, test.kind, test.objName); err != nil { + t.Errorf("failed to verify decoding the raw: %v", err) + } + }) + } +} + +func TestDecode(t *testing.T) { + tests := []struct { + name string + apiVersion string + kind string + objName string + req *Request + into Interface + prep func(re *Request, apiVersion, kind, name string) error + verify func(into Interface, apiVersion, kind, name string) error + wantErr bool + errMsg string + }{ + { + name: "Decode_ValidRequest_DecodeRequestIsSuccessful", + objName: "test-pod", + kind: "Pod", + apiVersion: "v1", + req: &Request{ + ResourceInterpreterRequest: configv1alpha1.ResourceInterpreterRequest{ + Object: runtime.RawExtension{}, + }, + }, + into: &unstructured.Unstructured{}, + prep: func(re *Request, apiVersion, kind, name string) error { + re.ResourceInterpreterRequest.Object.Raw = []byte(fmt.Sprintf(`{"apiVersion": "%s", "kind": "%s", "metadata": {"name": "%s"}}`, apiVersion, kind, name)) + return nil + }, + verify: verifyRuntimeObject, + wantErr: false, + }, + { + name: "Decode_EmptyRaw_NoContentToDecode", + req: &Request{ + ResourceInterpreterRequest: configv1alpha1.ResourceInterpreterRequest{ + Object: runtime.RawExtension{}, + }, + }, + into: &unstructured.Unstructured{}, + prep: func(*Request, string, string, string) error { return nil }, + verify: func(Interface, string, string, string) error { return nil }, + wantErr: true, + errMsg: "there is no content to decode", + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + if err := test.prep(test.req, test.apiVersion, test.kind, test.objName); err != nil { + t.Errorf("failed to prep the runtime raw extension object: %v", err) + } + scheme := runtime.NewScheme() + decoder := NewDecoder(scheme) + if decoder == nil { + t.Errorf("expected decoder to not be nil") + } + intoObj, ok := test.into.(runtime.Object) + if !ok { + t.Errorf("failed to type assert into object into runtime rawextension") + } + err := decoder.Decode(*test.req, intoObj) + if err != nil && !test.wantErr { + t.Errorf("unexpected error while decoding the raw: %v", err) + } + if err == nil && test.wantErr { + t.Errorf("expected an error, but got none") + } + if err != nil && test.wantErr && !strings.Contains(err.Error(), test.errMsg) { + t.Errorf("expected %s error msg to contain %s", err.Error(), test.errMsg) + } + if err := test.verify(test.into, test.apiVersion, test.kind, test.objName); err != nil { + t.Errorf("failed to verify decoding the raw: %v", err) + } + }) + } +} + +// verifyRuntimeObject checks if the runtime object (`into`) matches the given +// `apiVersion`, `kind`, and `name`. It returns an error if any field doesn't match. +func verifyRuntimeObject(into Interface, apiVersion, kind, name string) error { + if got := into.GetAPIVersion(); got != apiVersion { + return fmt.Errorf("expected API version '%s', got '%s'", apiVersion, got) + } + if got := into.GetKind(); got != kind { + return fmt.Errorf("expected kind '%s', got '%s'", kind, got) + } + if got := into.GetName(); got != name { + return fmt.Errorf("expected name '%s', got '%s'", name, got) + } + return nil +} diff --git a/pkg/webhook/interpreter/http_test.go b/pkg/webhook/interpreter/http_test.go new file mode 100644 index 000000000000..04443821b0da --- /dev/null +++ b/pkg/webhook/interpreter/http_test.go @@ -0,0 +1,346 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package interpreter + +import ( + "bytes" + "context" + "errors" + "fmt" + "io" + "net/http" + "net/http/httptest" + "strings" + "testing" + + "k8s.io/apimachinery/pkg/util/json" + + configv1alpha1 "github.com/karmada-io/karmada/pkg/apis/config/v1alpha1" +) + +// HTTPMockHandler implements the Handler and DecoderInjector interfaces for testing. +type HTTPMockHandler struct { + response Response + decoder *Decoder +} + +// Handle implements the Handler interface for HTTPMockHandler. +func (m *HTTPMockHandler) Handle(_ context.Context, _ Request) Response { + return m.response +} + +// InjectDecoder implements the DecoderInjector interface by setting the decoder. +func (m *HTTPMockHandler) InjectDecoder(decoder *Decoder) { + m.decoder = decoder +} + +// mockBody simulates an error when reading the request body. +type mockBody struct{} + +func (m *mockBody) Read(_ []byte) (n int, err error) { + return 0, errors.New("mock read error") +} + +func (m *mockBody) Close() error { + return nil +} + +// limitedBadResponseWriter is a custom io.Writer implementation that simulates +// write errors for a specified number of attempts. After a certain number of failures, +// it allows the write operation to succeed. +type limitedBadResponseWriter struct { + failCount int + maxFailures int +} + +// Write simulates writing data to the writer. It forces an error response for +// a limited number of attempts, specified by maxFailures. Once failCount reaches +// maxFailures, it allows the write to succeed. +func (b *limitedBadResponseWriter) Write(p []byte) (n int, err error) { + if b.failCount < b.maxFailures { + b.failCount++ + return 0, errors.New("forced write error") + } + // After reaching maxFailures, allow the write to succeed to stop the infinite loop. + return len(p), nil +} + +func TestServeHTTP(t *testing.T) { + tests := []struct { + name string + req *http.Request + mockHandler *HTTPMockHandler + contentType string + res configv1alpha1.ResourceInterpreterContext + prep func(*http.Request, string) error + want *configv1alpha1.ResourceInterpreterResponse + }{ + { + name: "ServeHTTP_EmptyBody_RequestFailed", + req: httptest.NewRequest(http.MethodPost, "/", nil), + mockHandler: &HTTPMockHandler{}, + contentType: "application/json", + prep: func(req *http.Request, contentType string) error { + req.Header.Set("Content-Type", contentType) + req.Body = nil + return nil + }, + want: &configv1alpha1.ResourceInterpreterResponse{ + UID: "", + Successful: false, + Status: &configv1alpha1.RequestStatus{ + Message: "request body is empty", + Code: http.StatusBadRequest, + }, + }, + }, + { + name: "ServeHTTP_InvalidContentType_ContentTypeIsInvalid", + req: httptest.NewRequest(http.MethodPost, "/", bytes.NewBuffer([]byte(`{}`))), + mockHandler: &HTTPMockHandler{}, + contentType: "text/plain", + prep: func(req *http.Request, contentType string) error { + req.Header.Set("Content-Type", contentType) + return nil + }, + want: &configv1alpha1.ResourceInterpreterResponse{ + UID: "", + Successful: false, + Status: &configv1alpha1.RequestStatus{ + Message: "contentType=text/plain, expected application/json", + Code: http.StatusBadRequest, + }, + }, + }, + { + name: "ServeHTTP_InvalidBodyJSON_JSONBodyIsInvalid", + req: httptest.NewRequest(http.MethodPost, "/", bytes.NewBuffer([]byte(`invalid-json`))), + mockHandler: &HTTPMockHandler{}, + contentType: "application/json", + prep: func(req *http.Request, contentType string) error { + req.Header.Set("Content-Type", contentType) + return nil + }, + want: &configv1alpha1.ResourceInterpreterResponse{ + UID: "", + Successful: false, + Status: &configv1alpha1.RequestStatus{ + Message: "json parse error", + Code: http.StatusBadRequest, + }, + }, + }, + { + name: "ServeHTTP_ReadBodyError_FailedToReadBody", + req: httptest.NewRequest(http.MethodPost, "/", &mockBody{}), + mockHandler: &HTTPMockHandler{}, + contentType: "application/json", + prep: func(req *http.Request, contentType string) error { + req.Header.Set("Content-Type", contentType) + return nil + }, + want: &configv1alpha1.ResourceInterpreterResponse{ + UID: "", + Successful: false, + Status: &configv1alpha1.RequestStatus{ + Message: "mock read error", + Code: http.StatusBadRequest, + }, + }, + }, + { + name: "ServeHTTP_ValidRequest_RequestIsValid", + req: httptest.NewRequest(http.MethodPost, "/", bytes.NewBuffer([]byte(`{}`))), + mockHandler: &HTTPMockHandler{ + response: Response{ + ResourceInterpreterResponse: configv1alpha1.ResourceInterpreterResponse{ + Successful: true, + Status: &configv1alpha1.RequestStatus{Code: http.StatusOK}, + }, + }, + }, + contentType: "application/json", + prep: func(req *http.Request, contentType string) error { + req.Header.Set("Content-Type", contentType) + requestBody := configv1alpha1.ResourceInterpreterContext{ + Request: &configv1alpha1.ResourceInterpreterRequest{ + UID: "test-uid", + }, + } + body, err := json.Marshal(requestBody) + if err != nil { + return fmt.Errorf("failed to marshal request body: %v", err) + } + req.Body = io.NopCloser(bytes.NewBuffer(body)) + return nil + }, + want: &configv1alpha1.ResourceInterpreterResponse{ + UID: "test-uid", + Successful: true, + Status: &configv1alpha1.RequestStatus{ + Message: "", + Code: http.StatusOK, + }, + }, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + recorder := httptest.NewRecorder() + if err := test.prep(test.req, test.contentType); err != nil { + t.Errorf("failed to prep serving http: %v", err) + } + webhook := NewWebhook(test.mockHandler, &Decoder{}) + webhook.ServeHTTP(recorder, test.req) + if err := verifyResourceInterpreterResponse(recorder.Body.Bytes(), test.want); err != nil { + t.Errorf("failed to verify resource interpreter response: %v", err) + } + }) + } +} + +func TestWriteResponse(t *testing.T) { + tests := []struct { + name string + res Response + rec *httptest.ResponseRecorder + mockHandler *HTTPMockHandler + decoder *Decoder + verify func([]byte, *configv1alpha1.ResourceInterpreterResponse) error + want *configv1alpha1.ResourceInterpreterResponse + }{ + { + name: "WriteResponse_ValidValues_IsSucceeded", + res: Response{ + ResourceInterpreterResponse: configv1alpha1.ResourceInterpreterResponse{ + UID: "test-uid", + Successful: true, + Status: &configv1alpha1.RequestStatus{Code: http.StatusOK}, + }, + }, + rec: httptest.NewRecorder(), + mockHandler: &HTTPMockHandler{}, + decoder: &Decoder{}, + verify: verifyResourceInterpreterResponse, + want: &configv1alpha1.ResourceInterpreterResponse{ + UID: "test-uid", + Successful: true, + Status: &configv1alpha1.RequestStatus{ + Message: "", + Code: http.StatusOK, + }, + }, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + webhook := NewWebhook(test.mockHandler, test.decoder) + webhook.writeResponse(test.rec, test.res) + if err := test.verify(test.rec.Body.Bytes(), test.want); err != nil { + t.Errorf("failed to verify resource interpreter response: %v", err) + } + }) + } +} + +func TestWriteResourceInterpreterResponse(t *testing.T) { + tests := []struct { + name string + mockHandler *HTTPMockHandler + rec io.Writer + res configv1alpha1.ResourceInterpreterContext + verify func(io.Writer, *configv1alpha1.ResourceInterpreterResponse) error + want *configv1alpha1.ResourceInterpreterResponse + }{ + { + name: "WriteResourceInterpreterResponse_ValidValues_WriteIsSuccessful", + mockHandler: &HTTPMockHandler{}, + rec: httptest.NewRecorder(), + res: configv1alpha1.ResourceInterpreterContext{ + Response: &configv1alpha1.ResourceInterpreterResponse{ + UID: "test-uid", + Successful: true, + Status: &configv1alpha1.RequestStatus{Code: http.StatusOK}, + }, + }, + verify: func(writer io.Writer, rir *configv1alpha1.ResourceInterpreterResponse) error { + data, ok := writer.(*httptest.ResponseRecorder) + if !ok { + return fmt.Errorf("expected writer of type httptest.ResponseRecorder but got %T", writer) + } + return verifyResourceInterpreterResponse(data.Body.Bytes(), rir) + }, + want: &configv1alpha1.ResourceInterpreterResponse{ + UID: "test-uid", + Successful: true, + Status: &configv1alpha1.RequestStatus{ + Message: "", + Code: http.StatusOK, + }, + }, + }, + { + name: "WriteResourceInterpreterResponse_FailedToWrite_WriterReachedMaxFailures", + mockHandler: &HTTPMockHandler{}, + res: configv1alpha1.ResourceInterpreterContext{ + Response: &configv1alpha1.ResourceInterpreterResponse{}, + }, + rec: &limitedBadResponseWriter{maxFailures: 3}, + verify: func(writer io.Writer, _ *configv1alpha1.ResourceInterpreterResponse) error { + data, ok := writer.(*limitedBadResponseWriter) + if !ok { + return fmt.Errorf("expected writer of type limitedBadResponseWriter but got %T", writer) + } + if data.failCount != data.maxFailures { + return fmt.Errorf("expected %d write failures, got %d", data.maxFailures, data.failCount) + } + return nil + }, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + webhook := NewWebhook(test.mockHandler, &Decoder{}) + webhook.writeResourceInterpreterResponse(test.rec, test.res) + if err := test.verify(test.rec, test.want); err != nil { + t.Errorf("failed to verify resource interpreter response: %v", err) + } + }) + } +} + +// verifyResourceInterpreterResponse unmarshals the provided body into a +// ResourceInterpreterContext and verifies it matches the expected values in res2. +func verifyResourceInterpreterResponse(body []byte, res2 *configv1alpha1.ResourceInterpreterResponse) error { + var resContext configv1alpha1.ResourceInterpreterContext + if err := json.Unmarshal(body, &resContext); err != nil { + return fmt.Errorf("failed to unmarshal body: %v", err) + } + if resContext.Response.UID != res2.UID { + return fmt.Errorf("expected UID %s, but got %s", res2.UID, resContext.Response.UID) + } + if resContext.Response.Successful != res2.Successful { + return fmt.Errorf("expected success status %t, but got %t", res2.Successful, resContext.Response.Successful) + } + if !strings.Contains(resContext.Response.Status.Message, res2.Status.Message) { + return fmt.Errorf("expected message %s to be subset, but got %s", res2.Status.Message, resContext.Response.Status.Message) + } + if resContext.Response.Status.Code != res2.Status.Code { + return fmt.Errorf("expected status code %d, but got %d", res2.Status.Code, resContext.Response.Status.Code) + } + return nil +} diff --git a/pkg/webhook/interpreter/inject_test.go b/pkg/webhook/interpreter/inject_test.go new file mode 100644 index 000000000000..c61d30cf00db --- /dev/null +++ b/pkg/webhook/interpreter/inject_test.go @@ -0,0 +1,69 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package interpreter + +import ( + "testing" +) + +// MockDecoderInjector is a mock struct implementing the DecoderInjector interface for testing purposes. +type MockDecoderInjector struct { + decoder *Decoder +} + +// InjectDecoder implements the DecoderInjector interface by setting the decoder. +func (m *MockDecoderInjector) InjectDecoder(decoder *Decoder) { + m.decoder = decoder +} + +func TestInjectDecoder(t *testing.T) { + tests := []struct { + name string + mockInjector interface{} + decoder *Decoder + wantToBeInjected bool + }{ + { + name: "InjectDecoder_ObjectImplementsDecoderInjector_Injected", + mockInjector: &MockDecoderInjector{}, + decoder: &Decoder{}, + wantToBeInjected: true, + }, + { + name: "InjectDecoder_ObjectNotImplementDecoderInjector_NotInjected", + mockInjector: struct{}{}, + decoder: &Decoder{}, + wantToBeInjected: false, + }, + { + name: "InjectDecoder_ObjectImplementsDecoderInjector_Injected", + mockInjector: &MockDecoderInjector{}, + wantToBeInjected: true, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + if got := InjectDecoderInto(test.decoder, test.mockInjector); got != test.wantToBeInjected { + t.Errorf("expected status injection to be %t, but got %t", test.wantToBeInjected, got) + } + if test.wantToBeInjected && test.mockInjector.(*MockDecoderInjector).decoder != test.decoder { + t.Errorf("failed to inject the correct decoder, expected %v but got %v", test.decoder, test.mockInjector.(*MockDecoderInjector).decoder) + } + }) + } +} diff --git a/pkg/webhook/interpreter/response_test.go b/pkg/webhook/interpreter/response_test.go new file mode 100644 index 000000000000..8a5aeebf979f --- /dev/null +++ b/pkg/webhook/interpreter/response_test.go @@ -0,0 +1,179 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package interpreter + +import ( + "encoding/json" + "fmt" + "net/http" + "reflect" + "testing" + + "gomodules.xyz/jsonpatch/v2" + + configv1alpha1 "github.com/karmada-io/karmada/pkg/apis/config/v1alpha1" +) + +// customError is a helper struct for simulating errors. +type customError struct { + msg string +} + +func (e *customError) Error() string { + return e.msg +} + +func TestErrored(t *testing.T) { + err := &customError{"Test Error"} + code := int32(500) + response := Errored(code, err) + + expectedResponse := Response{ + ResourceInterpreterResponse: configv1alpha1.ResourceInterpreterResponse{ + Successful: false, + Status: &configv1alpha1.RequestStatus{ + Code: code, + Message: err.Error(), + }, + }, + } + + if !reflect.DeepEqual(expectedResponse, response) { + t.Errorf("response mismatch: expected %v, got %v", expectedResponse, response) + } +} + +func TestSucceeded(t *testing.T) { + message := "Operation succeeded" + response := Succeeded(message) + + expectedResponse := ValidationResponse(true, message) + + if !reflect.DeepEqual(expectedResponse, response) { + t.Errorf("response mismatch: expected %v, got %v", expectedResponse, response) + } +} + +func TestValidationResponse(t *testing.T) { + tests := []struct { + name string + msg string + isSuccessful bool + want Response + }{ + { + name: "ValidationResponse_IsSuccessful_Succeeded", + msg: "Success", + isSuccessful: true, + want: Response{ + ResourceInterpreterResponse: configv1alpha1.ResourceInterpreterResponse{ + Successful: true, + Status: &configv1alpha1.RequestStatus{ + Code: int32(http.StatusOK), + Message: "Success", + }, + }, + }, + }, + { + name: "ValidationResponse_IsFailed_Failed", + msg: "Failed", + isSuccessful: false, + want: Response{ + ResourceInterpreterResponse: configv1alpha1.ResourceInterpreterResponse{ + Successful: false, + Status: &configv1alpha1.RequestStatus{ + Code: int32(http.StatusForbidden), + Message: "Failed", + }, + }, + }, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + response := ValidationResponse(test.isSuccessful, test.msg) + if !reflect.DeepEqual(response, test.want) { + t.Errorf("expected response %v but got response %v", test.want, response) + } + }) + } +} + +func TestPatchResponseFromRaw(t *testing.T) { + tests := []struct { + name string + original, current []byte + expectedPatch []jsonpatch.Operation + res Response + want Response + prep func(wantRes *Response) error + }{ + { + name: "PatchResponseFromRaw_ReplacePatch_ReplacePatchExpected", + original: []byte(fmt.Sprintf(`{"name": "%s"}`, "original")), + current: []byte(fmt.Sprintf(`{"name": "%s"}`, "current")), + prep: func(wantRes *Response) error { + expectedPatch := []jsonpatch.Operation{ + { + Operation: "replace", + Path: "/name", + Value: "current", + }, + } + expectedPatchJSON, err := json.Marshal(expectedPatch) + if err != nil { + return fmt.Errorf("marshal failure: %v", err) + } + wantRes.ResourceInterpreterResponse.Patch = expectedPatchJSON + + return nil + }, + want: Response{ + ResourceInterpreterResponse: configv1alpha1.ResourceInterpreterResponse{ + Successful: true, + PatchType: func() *configv1alpha1.PatchType { pt := configv1alpha1.PatchTypeJSONPatch; return &pt }(), + }, + }, + }, + { + name: "PatchResponseFromRaw_OriginalSameAsCurrentValue_NoPatchExpected", + original: []byte(fmt.Sprintf(`{"name": "%s"}`, "same")), + current: []byte(fmt.Sprintf(`{"name": "%s"}`, "same")), + prep: func(*Response) error { return nil }, + want: Succeeded(""), + }, + { + name: "PatchResponseFromRaw_InvalidJSONDocument_JSONDocumentIsInvalid", + original: []byte(nil), + current: []byte("updated"), + prep: func(*Response) error { return nil }, + want: Errored(http.StatusInternalServerError, &customError{"invalid JSON Document"}), + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + if err := test.prep(&test.want); err != nil { + t.Errorf("failed to prep for patching response from raw: %v", err) + } + patchResponse := PatchResponseFromRaw(test.original, test.current) + if !reflect.DeepEqual(patchResponse, test.want) { + t.Errorf("unexpected error, patch responses not matched; expected %v, but got %v", patchResponse, test.want) + } + }) + } +} diff --git a/pkg/webhook/interpreter/webhook_test.go b/pkg/webhook/interpreter/webhook_test.go new file mode 100644 index 000000000000..5b5e7e6712f1 --- /dev/null +++ b/pkg/webhook/interpreter/webhook_test.go @@ -0,0 +1,158 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package interpreter + +import ( + "context" + "fmt" + "net/http" + "reflect" + "testing" + + "k8s.io/apimachinery/pkg/types" + + configv1alpha1 "github.com/karmada-io/karmada/pkg/apis/config/v1alpha1" +) + +// WebhookMockHandler implements the Handler and DecoderInjector interfaces for testing. +type WebhookMockHandler struct { + response Response + decoder *Decoder +} + +// Handle implements the Handler interface for WebhookMockHandler. +func (m *WebhookMockHandler) Handle(_ context.Context, _ Request) Response { + return m.response +} + +// InjectDecoder implements the DecoderInjector interface by setting the decoder. +func (m *WebhookMockHandler) InjectDecoder(decoder *Decoder) { + m.decoder = decoder +} + +func TestNewWebhook(t *testing.T) { + mockHandler := &WebhookMockHandler{} + decoder := &Decoder{} + + webhook := NewWebhook(mockHandler, decoder) + if webhook == nil { + t.Fatalf("webhook returned by NewWebhook() is nil") + } + if webhook.handler != mockHandler { + t.Errorf("webhook has incorrect handler: expected %v, got %v", mockHandler, webhook.handler) + } +} + +func TestHandle(t *testing.T) { + var uid types.UID = "test-uid" + + expectedResponse := Response{ + ResourceInterpreterResponse: configv1alpha1.ResourceInterpreterResponse{ + Successful: true, + Status: &configv1alpha1.RequestStatus{ + Code: http.StatusOK, + }, + UID: uid, + }, + } + + mockHandler := &WebhookMockHandler{response: expectedResponse} + webhook := NewWebhook(mockHandler, &Decoder{}) + req := Request{ + ResourceInterpreterRequest: configv1alpha1.ResourceInterpreterRequest{ + UID: uid, + }, + } + + resp := webhook.Handle(context.TODO(), req) + if !reflect.DeepEqual(resp, expectedResponse) { + t.Errorf("response mismatch in Handle(): expected %v, got %v", expectedResponse, resp) + } + if resp.UID != req.UID { + t.Errorf("uid was not set as expected: expected %v, got %v", req.UID, resp.UID) + } +} + +func TestComplete(t *testing.T) { + tests := []struct { + name string + req Request + res Response + verify func(*Response, *Request) error + }{ + { + name: "TestComplete_StatusAndStatusCodeAreUnset_FieldsArePopulated", + req: Request{ + ResourceInterpreterRequest: configv1alpha1.ResourceInterpreterRequest{ + UID: "test-uid", + }, + }, + res: Response{}, + verify: verifyResourceInterpreterCompleteResponse, + }, + { + name: "TestComplete_OverrideResponseUIDAndStatusCode_ResponseUIDAndStatusCodeAreOverrided", + req: Request{ + ResourceInterpreterRequest: configv1alpha1.ResourceInterpreterRequest{ + UID: "test-uid", + }, + }, + res: Response{ + ResourceInterpreterResponse: configv1alpha1.ResourceInterpreterResponse{ + UID: "existing-uid", + Status: &configv1alpha1.RequestStatus{ + Code: http.StatusForbidden, + }, + }, + }, + verify: func(resp *Response, req *Request) error { + if resp.UID != req.UID { + return fmt.Errorf("uid should be overridden if it's already set in the request: expected %v, got %v", req.UID, resp.UID) + } + if resp.Status.Code != http.StatusForbidden { + return fmt.Errorf("status code should not be overridden if it's already set: expected %v, got %v", http.StatusForbidden, resp.Status.Code) + } + return nil + }, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + test.res.Complete(test.req) + if err := test.verify(&test.res, &test.req); err != nil { + t.Errorf("failed to verify complete resource interpreter response: %v", err) + } + }) + } +} + +// verifyResourceInterpreterCompleteResponse checks if the response from +// the resource interpreter's Complete method is valid. +// It ensures the response UID matches the request UID, the Status is initialized, +// and the Status code is set to http.StatusOK. Returns an error if any check fails. +func verifyResourceInterpreterCompleteResponse(res *Response, req *Request) error { + if res.UID != req.UID { + return fmt.Errorf("uid was not set as expected: expected %v, got %v", req.UID, res.UID) + } + if res.Status == nil { + return fmt.Errorf("status should be initialized if it's nil") + } + if res.Status.Code != http.StatusOK { + return fmt.Errorf("status code should be set to %v if it was 0, got %v", http.StatusOK, res.Status.Code) + } + return nil +} diff --git a/pkg/webhook/multiclusteringress/validating_test.go b/pkg/webhook/multiclusteringress/validating_test.go index 664c7496d861..91b21ecdd910 100644 --- a/pkg/webhook/multiclusteringress/validating_test.go +++ b/pkg/webhook/multiclusteringress/validating_test.go @@ -1,5 +1,5 @@ /* -Copyright 2023 The Karmada Authors. +Copyright 2024 The Karmada Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/webhook/overridepolicy/validating_test.go b/pkg/webhook/overridepolicy/validating_test.go index cf299f83d60a..43a9c6bc3c48 100644 --- a/pkg/webhook/overridepolicy/validating_test.go +++ b/pkg/webhook/overridepolicy/validating_test.go @@ -24,6 +24,7 @@ import ( "strings" "testing" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" "k8s.io/apimachinery/pkg/runtime" "sigs.k8s.io/controller-runtime/pkg/webhook/admission" @@ -154,6 +155,241 @@ func TestValidatingAdmission_Handle(t *testing.T) { Message: "", }, }, + { + name: "Handle_FieldOverrider_ContainsBothYAMLAndJSON_DeniesAdmission", + decoder: &fakeValidationDecoder{ + obj: &policyv1alpha1.OverridePolicy{ + Spec: policyv1alpha1.OverrideSpec{ + ResourceSelectors: []policyv1alpha1.ResourceSelector{ + {APIVersion: "test-apiversion", Kind: "test"}, + }, + OverrideRules: []policyv1alpha1.RuleWithCluster{ + { + TargetCluster: &policyv1alpha1.ClusterAffinity{ + ClusterNames: []string{"member1"}, + }, + Overriders: policyv1alpha1.Overriders{ + FieldOverrider: []policyv1alpha1.FieldOverrider{ + { + FieldPath: "/data/config", + JSON: []policyv1alpha1.JSONPatchOperation{ + { + SubPath: "/db-config", + Operator: policyv1alpha1.OverriderOpReplace, + Value: apiextensionsv1.JSON{Raw: []byte(`{"db": "new"}`)}, + }, + }, + YAML: []policyv1alpha1.YAMLPatchOperation{ + { + SubPath: "/db-config", + Operator: policyv1alpha1.OverriderOpReplace, + Value: apiextensionsv1.JSON{Raw: []byte("db: new")}, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + req: admission.Request{}, + want: TestResponse{ + Type: Denied, + Message: "FieldOverrider has both YAML and JSON set. Only one is allowed", + }, + }, + { + name: "Handle_InvalidFieldPathInYAML_DeniesAdmission", + decoder: &fakeValidationDecoder{ + obj: &policyv1alpha1.OverridePolicy{ + Spec: policyv1alpha1.OverrideSpec{ + OverrideRules: []policyv1alpha1.RuleWithCluster{ + { + Overriders: policyv1alpha1.Overriders{ + FieldOverrider: []policyv1alpha1.FieldOverrider{ + { + FieldPath: "invalidPath", + YAML: []policyv1alpha1.YAMLPatchOperation{ + { + SubPath: "/db-config", + Operator: policyv1alpha1.OverriderOpReplace, + Value: apiextensionsv1.JSON{Raw: []byte("db: new")}, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + req: admission.Request{}, + want: TestResponse{ + Type: Denied, + Message: "spec.overrideRules[0].overriders.fieldOverrider[0].fieldPath: Invalid value: \"invalidPath\": JSON pointer must be empty or start with a \"/", + }, + }, + { + name: "Handle_InvalidJSONSubPath_DeniesAdmission", + decoder: &fakeValidationDecoder{ + obj: &policyv1alpha1.OverridePolicy{ + Spec: policyv1alpha1.OverrideSpec{ + OverrideRules: []policyv1alpha1.RuleWithCluster{ + { + Overriders: policyv1alpha1.Overriders{ + FieldOverrider: []policyv1alpha1.FieldOverrider{ + { + FieldPath: "/data/config", + JSON: []policyv1alpha1.JSONPatchOperation{ + { + SubPath: "invalidSubPath", + Operator: policyv1alpha1.OverriderOpReplace, + Value: apiextensionsv1.JSON{Raw: []byte(`{"db": "new"}`)}, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + req: admission.Request{}, + want: TestResponse{ + Type: Denied, + Message: "spec.overrideRules[0].overriders.fieldOverrider[0].json[0].subPath: Invalid value: \"invalidSubPath\": JSON pointer must be empty or start with a \"/", + }, + }, + { + name: "Handle_InvalidYAMLSubPath_DeniesAdmission", + decoder: &fakeValidationDecoder{ + obj: &policyv1alpha1.OverridePolicy{ + Spec: policyv1alpha1.OverrideSpec{ + OverrideRules: []policyv1alpha1.RuleWithCluster{ + { + Overriders: policyv1alpha1.Overriders{ + FieldOverrider: []policyv1alpha1.FieldOverrider{ + { + FieldPath: "/data/config", + YAML: []policyv1alpha1.YAMLPatchOperation{ + { + SubPath: "invalidSubPath", + Operator: policyv1alpha1.OverriderOpReplace, + Value: apiextensionsv1.JSON{Raw: []byte("db: new")}, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + req: admission.Request{}, + want: TestResponse{ + Type: Denied, + Message: "spec.overrideRules[0].overriders.fieldOverrider[0].yaml[0].subPath: Invalid value: \"invalidSubPath\": JSON pointer must be empty or start with a \"/", + }, + }, + { + name: "Handle_InvalidJSONValue_DeniesAdmission_OverriderOpReplace", + decoder: &fakeValidationDecoder{ + obj: &policyv1alpha1.OverridePolicy{ + Spec: policyv1alpha1.OverrideSpec{ + OverrideRules: []policyv1alpha1.RuleWithCluster{ + { + Overriders: policyv1alpha1.Overriders{ + FieldOverrider: []policyv1alpha1.FieldOverrider{ + { + FieldPath: "/data/config", + JSON: []policyv1alpha1.JSONPatchOperation{ + { + SubPath: "/db-config", + Operator: policyv1alpha1.OverriderOpReplace, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + req: admission.Request{}, + want: TestResponse{ + Type: Denied, + Message: "spec.overrideRules[0].overriders.fieldOverrider[0].json[0].value: Invalid value: v1.JSON{Raw:[]uint8(nil)}: value is required for add or replace operation", + }, + }, + { + name: "Handle_InvalidJSONValue_DeniesAdmission_OverriderOpAdd", + decoder: &fakeValidationDecoder{ + obj: &policyv1alpha1.OverridePolicy{ + Spec: policyv1alpha1.OverrideSpec{ + OverrideRules: []policyv1alpha1.RuleWithCluster{ + { + Overriders: policyv1alpha1.Overriders{ + FieldOverrider: []policyv1alpha1.FieldOverrider{ + { + FieldPath: "/data/config", + JSON: []policyv1alpha1.JSONPatchOperation{ + { + SubPath: "/db-config", + Operator: policyv1alpha1.OverriderOpAdd, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + req: admission.Request{}, + want: TestResponse{ + Type: Denied, + Message: "spec.overrideRules[0].overriders.fieldOverrider[0].json[0].value: Invalid value: v1.JSON{Raw:[]uint8(nil)}: value is required for add or replace operation", + }, + }, + { + name: "Handle_InvalidJSONValue_DeniesAdmission_OverriderOpRemove", + decoder: &fakeValidationDecoder{ + obj: &policyv1alpha1.OverridePolicy{ + Spec: policyv1alpha1.OverrideSpec{ + OverrideRules: []policyv1alpha1.RuleWithCluster{ + { + Overriders: policyv1alpha1.Overriders{ + FieldOverrider: []policyv1alpha1.FieldOverrider{ + { + FieldPath: "/data/config", + JSON: []policyv1alpha1.JSONPatchOperation{ + { + SubPath: "/db-config", + Operator: policyv1alpha1.OverriderOpRemove, + Value: apiextensionsv1.JSON{Raw: []byte(`{"db": "new"}`)}, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + req: admission.Request{}, + want: TestResponse{ + Type: Denied, + Message: "spec.overrideRules[0].overriders.fieldOverrider[0].json[0].value: Invalid value: v1.JSON{Raw:[]uint8{0x7b, 0x22, 0x64, 0x62, 0x22, 0x3a, 0x20, 0x22, 0x6e, 0x65, 0x77, 0x22, 0x7d}}: value is not allowed for remove operation", + }, + }, } for _, tt := range tests { diff --git a/test/e2e/coverage_docs/overridepolicy_test.md b/test/e2e/coverage_docs/overridepolicy_test.md index 042b93564a97..9de4380e37c0 100644 --- a/test/e2e/coverage_docs/overridepolicy_test.md +++ b/test/e2e/coverage_docs/overridepolicy_test.md @@ -8,6 +8,8 @@ | Check if the OverridePolicy will update the deployment's image value | deployment imageOverride testing | | | Check if the OverridePolicy will update the pod's image value | pod imageOverride testing | | | Check if the OverridePolicy will update the specific image value | deployment imageOverride testing | | +| Check if the OverridePolicy will update the value inside JSON | deployment fieldOverride testing | | +| Check if the OverridePolicy will update the value inside YAML | deployment fieldOverride testing | | #### OverridePolicy with nil resourceSelector testing | Test Case | E2E Describe Text | Comments | diff --git a/test/e2e/overridepolicy_test.go b/test/e2e/overridepolicy_test.go index a7f8be920708..663e4951d79e 100644 --- a/test/e2e/overridepolicy_test.go +++ b/test/e2e/overridepolicy_test.go @@ -17,9 +17,13 @@ limitations under the License. package e2e import ( + "fmt" + "strings" + "github.com/onsi/ginkgo/v2" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" "k8s.io/apimachinery/pkg/util/rand" "k8s.io/klog/v2" @@ -425,6 +429,221 @@ var _ = ginkgo.Describe("[OverridePolicy] apply overriders testing", func() { }) }) }) + + ginkgo.Context("[FieldOverrider] apply field overrider testing to update JSON values in ConfigMap", func() { + var configMapNamespace, configMapName string + var configMap *corev1.ConfigMap + + ginkgo.BeforeEach(func() { + configMapNamespace = testNamespace + configMapName = configMapNamePrefix + rand.String(RandomStrLength) + propagationPolicyNamespace = testNamespace + propagationPolicyName = configMapName + overridePolicyNamespace = testNamespace + overridePolicyName = configMapName + + configMapData := map[string]string{ + "deploy.json": fmt.Sprintf(`{ + "apiVersion": "apps/v1", + "kind": "Deployment", + "metadata": { + "name": "nginx-deploy", + "namespace": "%s" + }, + "spec": { + "replicas": 3, + "selector": { + "matchLabels": { + "app": "nginx" + } + }, + "template": { + "metadata": { + "labels": { + "app": "nginx" + } + }, + "spec": { + "containers": [ + { + "name": "nginx", + "image": "nginx:1.19.0" + } + ] + } + } + } + }`, configMapNamespace), + } + + configMap = helper.NewConfigMap(configMapNamespace, configMapName, configMapData) + propagationPolicy = helper.NewPropagationPolicy(propagationPolicyNamespace, propagationPolicyName, []policyv1alpha1.ResourceSelector{ + { + APIVersion: configMap.APIVersion, + Kind: configMap.Kind, + Name: configMap.Name, + }, + }, policyv1alpha1.Placement{ + ClusterAffinity: &policyv1alpha1.ClusterAffinity{ + ClusterNames: framework.ClusterNames(), + }, + }) + + overridePolicy = helper.NewOverridePolicy(overridePolicyNamespace, overridePolicyName, []policyv1alpha1.ResourceSelector{ + { + APIVersion: configMap.APIVersion, + Kind: configMap.Kind, + Name: configMap.Name, + }, + }, policyv1alpha1.ClusterAffinity{ + ClusterNames: framework.ClusterNames(), + }, policyv1alpha1.Overriders{ + FieldOverrider: []policyv1alpha1.FieldOverrider{ + { + FieldPath: "/data/deploy.json", + JSON: []policyv1alpha1.JSONPatchOperation{ + { + SubPath: "/spec/replicas", + Operator: policyv1alpha1.OverriderOpReplace, + Value: apiextensionsv1.JSON{Raw: []byte(`5`)}, + }, + { + SubPath: "/spec/template/spec/containers/-", + Operator: policyv1alpha1.OverriderOpAdd, + Value: apiextensionsv1.JSON{Raw: []byte(`{"name": "nginx-helper", "image": "nginx:1.19.1"}`)}, + }, + { + SubPath: "/spec/template/spec/containers/0/image", + Operator: policyv1alpha1.OverriderOpRemove, + }, + }, + }, + }, + }) + }) + + ginkgo.BeforeEach(func() { + framework.CreatePropagationPolicy(karmadaClient, propagationPolicy) + framework.CreateOverridePolicy(karmadaClient, overridePolicy) + framework.CreateConfigMap(kubeClient, configMap) + ginkgo.DeferCleanup(func() { + framework.RemovePropagationPolicy(karmadaClient, propagationPolicy.Namespace, propagationPolicy.Name) + framework.RemoveOverridePolicy(karmadaClient, overridePolicy.Namespace, overridePolicy.Name) + framework.RemoveConfigMap(kubeClient, configMap.Namespace, configMap.Name) + }) + }) + + ginkgo.It("should override JSON field in ConfigMap", func() { + klog.Infof("check if configMap present on member clusters has the correct JSON field value.") + framework.WaitConfigMapPresentOnClustersFitWith(framework.ClusterNames(), configMap.Namespace, configMap.Name, + func(cm *corev1.ConfigMap) bool { + return strings.Contains(cm.Data["deploy.json"], `"replicas":5`) && + strings.Contains(cm.Data["deploy.json"], `"name":"nginx-helper"`) && + !strings.Contains(cm.Data["deploy.json"], `"image":"nginx:1.19.0"`) + }) + }) + }) + + ginkgo.Context("[FieldOverrider] apply field overrider testing to update YAML values in ConfigMap", func() { + var configMapNamespace, configMapName string + var configMap *corev1.ConfigMap + + ginkgo.BeforeEach(func() { + configMapNamespace = testNamespace + configMapName = configMapNamePrefix + rand.String(RandomStrLength) + propagationPolicyNamespace = testNamespace + propagationPolicyName = configMapName + overridePolicyNamespace = testNamespace + overridePolicyName = configMapName + + // Define the ConfigMap data + configMapData := map[string]string{ + "nginx.yaml": ` +server: + listen: 80 + server_name: localhost + location /: + root: /usr/share/nginx/html + index: + - index.html + - index.htm + error_page: + - code: 500 + - code: 502 + - code: 503 + - code: 504 + location /50x.html: + root: /usr/share/nginx/html +`, + } + configMap = helper.NewConfigMap(configMapNamespace, configMapName, configMapData) + propagationPolicy = helper.NewPropagationPolicy(propagationPolicyNamespace, propagationPolicyName, []policyv1alpha1.ResourceSelector{ + { + APIVersion: configMap.APIVersion, + Kind: configMap.Kind, + Name: configMap.Name, + }, + }, policyv1alpha1.Placement{ + ClusterAffinity: &policyv1alpha1.ClusterAffinity{ + ClusterNames: framework.ClusterNames(), + }, + }) + + overridePolicy = helper.NewOverridePolicy(overridePolicyNamespace, overridePolicyName, []policyv1alpha1.ResourceSelector{ + { + APIVersion: configMap.APIVersion, + Kind: configMap.Kind, + Name: configMap.Name, + }, + }, policyv1alpha1.ClusterAffinity{ + ClusterNames: framework.ClusterNames(), + }, policyv1alpha1.Overriders{ + FieldOverrider: []policyv1alpha1.FieldOverrider{ + { + FieldPath: "/data/nginx.yaml", + YAML: []policyv1alpha1.YAMLPatchOperation{ + { + SubPath: "/server/location ~1/root", + Operator: policyv1alpha1.OverriderOpReplace, + Value: apiextensionsv1.JSON{Raw: []byte(`"/var/www/html"`)}, + }, + { + SubPath: "/server/error_page/-", + Operator: policyv1alpha1.OverriderOpAdd, + Value: apiextensionsv1.JSON{Raw: []byte(`{"code": 400}`)}, + }, + { + SubPath: "/server/location ~1/index", + Operator: policyv1alpha1.OverriderOpRemove, + }, + }, + }, + }, + }) + }) + + ginkgo.BeforeEach(func() { + framework.CreatePropagationPolicy(karmadaClient, propagationPolicy) + framework.CreateOverridePolicy(karmadaClient, overridePolicy) + framework.CreateConfigMap(kubeClient, configMap) + ginkgo.DeferCleanup(func() { + framework.RemovePropagationPolicy(karmadaClient, propagationPolicy.Namespace, propagationPolicy.Name) + framework.RemoveOverridePolicy(karmadaClient, overridePolicy.Namespace, overridePolicy.Name) + framework.RemoveConfigMap(kubeClient, configMap.Namespace, configMap.Name) + }) + }) + + ginkgo.It("should override YAML field in ConfigMap", func() { + klog.Infof("check if configMap present on member clusters has the correct YAML field value.") + framework.WaitConfigMapPresentOnClustersFitWith(framework.ClusterNames(), configMap.Namespace, configMap.Name, + func(cm *corev1.ConfigMap) bool { + return strings.Contains(cm.Data["nginx.yaml"], "root: /var/www/html") && + strings.Contains(cm.Data["nginx.yaml"], "code: 400") && + !strings.Contains(cm.Data["nginx.yaml"], "- index.html") + }) + }) + }) + }) var _ = framework.SerialDescribe("OverridePolicy with nil resourceSelector testing", func() {