diff --git a/.github/workflows/ci-build.yaml b/.github/workflows/ci-build.yaml index 622710178370e..8a7d01be8e2a3 100644 --- a/.github/workflows/ci-build.yaml +++ b/.github/workflows/ci-build.yaml @@ -28,7 +28,7 @@ jobs: runs-on: ubuntu-22.04 steps: - name: Checkout code - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 - name: Setup Golang uses: actions/setup-go@fac708d6674e30b6ba41289acaab6d4b75aa0753 # v4.0.0 with: @@ -46,7 +46,7 @@ jobs: runs-on: ubuntu-22.04 steps: - name: Checkout code - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 - name: Setup Golang uses: actions/setup-go@fac708d6674e30b6ba41289acaab6d4b75aa0753 # v4.0.0 with: @@ -70,13 +70,13 @@ jobs: runs-on: ubuntu-22.04 steps: - name: Checkout code - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 - name: Setup Golang uses: actions/setup-go@fac708d6674e30b6ba41289acaab6d4b75aa0753 # v4.0.0 with: go-version: ${{ env.GOLANG_VERSION }} - name: Run golangci-lint - uses: golangci/golangci-lint-action@639cd343e1d3b897ff35927a75193d57cfcba299 # v3.6.0 + uses: golangci/golangci-lint-action@08e2f20817b15149a52b5b3ebe7de50aff2ba8c5 # v3.4.0 with: version: v1.51.0 args: --timeout 10m --exclude SA5011 --verbose @@ -93,7 +93,7 @@ jobs: - name: Create checkout directory run: mkdir -p ~/go/src/github.com/argoproj - name: Checkout code - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 - name: Create symlink in GOPATH run: ln -s $(pwd) ~/go/src/github.com/argoproj/argo-cd - name: Setup Golang @@ -149,7 +149,7 @@ jobs: path: test-results/ test-go-race: - name: Run unit tests with -race for Go packages + name: Run unit tests with -race, for Go packages runs-on: ubuntu-22.04 needs: - build-go @@ -160,7 +160,7 @@ jobs: - name: Create checkout directory run: mkdir -p ~/go/src/github.com/argoproj - name: Checkout code - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 - name: Create symlink in GOPATH run: ln -s $(pwd) ~/go/src/github.com/argoproj/argo-cd - name: Setup Golang @@ -215,7 +215,7 @@ jobs: runs-on: ubuntu-22.04 steps: - name: Checkout code - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 - name: Setup Golang uses: actions/setup-go@fac708d6674e30b6ba41289acaab6d4b75aa0753 # v4.0.0 with: @@ -263,7 +263,7 @@ jobs: runs-on: ubuntu-22.04 steps: - name: Checkout code - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 - name: Setup NodeJS uses: actions/setup-node@64ed1c7eab4cce3362f8c340dee64e5eaeef8f7c # v3.6.0 with: @@ -300,7 +300,7 @@ jobs: sonar_secret: ${{ secrets.SONAR_TOKEN }} steps: - name: Checkout code - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 with: fetch-depth: 0 - name: Restore node dependency cache @@ -379,7 +379,7 @@ jobs: GITLAB_TOKEN: ${{ secrets.E2E_TEST_GITLAB_TOKEN }} steps: - name: Checkout code - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 - name: Setup Golang uses: actions/setup-go@fac708d6674e30b6ba41289acaab6d4b75aa0753 # v4.0.0 with: diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index 284484e36d695..54ee690ceb378 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -30,7 +30,7 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL diff --git a/.github/workflows/image-reuse.yaml b/.github/workflows/image-reuse.yaml index d5232e148c037..d36e0170ee3d3 100644 --- a/.github/workflows/image-reuse.yaml +++ b/.github/workflows/image-reuse.yaml @@ -58,14 +58,14 @@ jobs: image-digest: ${{ steps.image.outputs.digest }} steps: - name: Checkout code - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.3.0 + uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.3.0 with: fetch-depth: 0 token: ${{ secrets.GITHUB_TOKEN }} if: ${{ github.ref_type == 'tag'}} - name: Checkout code - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.3.0 + uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.3.0 if: ${{ github.ref_type != 'tag'}} - name: Setup Golang @@ -78,8 +78,8 @@ jobs: with: cosign-release: 'v2.0.0' - - uses: docker/setup-qemu-action@2b82ce82d56a2a04d2637cd93a637ae1b359c0a7 # v2.2.0 - - uses: docker/setup-buildx-action@ecf95283f03858871ff00b787d79c419715afc34 # v2.7.0 + - uses: docker/setup-qemu-action@e81a89b1732b9c48d79cd809d8d81d79c4647a18 # v2.1.0 + - uses: docker/setup-buildx-action@4b4e9c3e2d4531116a6f8ba8e71fc6e2cb6e6c8c # v2.5.0 - name: Setup tags for container image as a CSV type run: | @@ -106,7 +106,7 @@ jobs: echo 'EOF' >> $GITHUB_ENV - name: Login to Quay.io - uses: docker/login-action@465a07811f14bebb1938fbed4728c6a1ff8901fc # v2.2.0 + uses: docker/login-action@f4ef78c080cd8ba55a85445d5b36e214a81df20a # v2.1.0 with: registry: quay.io username: ${{ secrets.quay_username }} @@ -114,7 +114,7 @@ jobs: if: ${{ inputs.quay_image_name && inputs.push }} - name: Login to GitHub Container Registry - uses: docker/login-action@465a07811f14bebb1938fbed4728c6a1ff8901fc # v2.2.0 + uses: docker/login-action@f4ef78c080cd8ba55a85445d5b36e214a81df20a # v2.1.0 with: registry: ghcr.io username: ${{ secrets.ghcr_username }} @@ -122,7 +122,7 @@ jobs: if: ${{ inputs.ghcr_image_name && inputs.push }} - name: Login to dockerhub Container Registry - uses: docker/login-action@465a07811f14bebb1938fbed4728c6a1ff8901fc # v2.2.0 + uses: docker/login-action@f4ef78c080cd8ba55a85445d5b36e214a81df20a # v2.1.0 with: username: ${{ secrets.docker_username }} password: ${{ secrets.docker_password }} @@ -130,7 +130,7 @@ jobs: - name: Build and push container image id: image - uses: docker/build-push-action@2eb1c1961a95fc15694676618e422e8ba1d63825 #v4.1.1 + uses: docker/build-push-action@3b5e8027fcad23fda98b2e3ac259d8d67585f671 #v4.0.0 with: context: . platforms: ${{ inputs.platforms }} diff --git a/.github/workflows/image.yaml b/.github/workflows/image.yaml index e98ae27708263..0a8898777cea0 100644 --- a/.github/workflows/image.yaml +++ b/.github/workflows/image.yaml @@ -25,7 +25,7 @@ jobs: image-tag: ${{ steps.image.outputs.tag}} platforms: ${{ steps.platforms.outputs.platforms }} steps: - - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 - name: Set image tag for ghcr run: echo "tag=$(cat ./VERSION)-${GITHUB_SHA::8}" >> $GITHUB_OUTPUT @@ -86,7 +86,7 @@ jobs: packages: write # for uploading attestations. (https://github.com/slsa-framework/slsa-github-generator/blob/main/internal/builders/container/README.md#known-issues) if: ${{ github.repository == 'argoproj/argo-cd' && github.event_name == 'push' }} # Must be refernced by a tag. https://github.com/slsa-framework/slsa-github-generator/blob/main/internal/builders/container/README.md#referencing-the-slsa-generator - uses: slsa-framework/slsa-github-generator/.github/workflows/generator_container_slsa3.yml@v1.7.0 + uses: slsa-framework/slsa-github-generator/.github/workflows/generator_container_slsa3.yml@v1.6.0 with: image: ghcr.io/argoproj/argo-cd/argocd digest: ${{ needs.build-and-publish.outputs.image-digest }} @@ -104,7 +104,7 @@ jobs: if: ${{ github.repository == 'argoproj/argo-cd' && github.event_name == 'push' }} runs-on: ubuntu-22.04 steps: - - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.3.0 + - uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.3.0 - run: git clone "https://$TOKEN@github.com/argoproj/argoproj-deployments" env: TOKEN: ${{ secrets.TOKEN }} diff --git a/.github/workflows/init-release.yaml b/.github/workflows/init-release.yaml index 6881dc379aaa4..ab62e3b69cb1a 100644 --- a/.github/workflows/init-release.yaml +++ b/.github/workflows/init-release.yaml @@ -23,7 +23,7 @@ jobs: runs-on: ubuntu-22.04 steps: - name: Checkout code - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.2.0 + uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.2.0 with: fetch-depth: 0 token: ${{ secrets.GITHUB_TOKEN }} @@ -57,7 +57,7 @@ jobs: git diff - name: Create pull request - uses: peter-evans/create-pull-request@153407881ec5c347639a548ade7d8ad1d6740e38 # v5.0.2 + uses: peter-evans/create-pull-request@284f54f989303d2699d373481a0cfa13ad5a6666 # v5.0.1 with: commit-message: "Bump version to ${{ inputs.TARGET_VERSION }}" title: "Bump version to ${{ inputs.TARGET_VERSION }} on ${{ inputs.TARGET_BRANCH }} branch" diff --git a/.github/workflows/pr-title-check.yml b/.github/workflows/pr-title-check.yml index 020535d7b8afa..d6dabc96b8c0b 100644 --- a/.github/workflows/pr-title-check.yml +++ b/.github/workflows/pr-title-check.yml @@ -23,7 +23,7 @@ jobs: name: Validate PR Title runs-on: ubuntu-latest steps: - - uses: thehanimo/pr-title-checker@0cf5902181e78341bb97bb06646396e5bd354b3f # v1.4.0 + - uses: thehanimo/pr-title-checker@cdafc664bf9b25678d4e6df76ff67b2fe21bb5d2 # v1.3.7 with: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} configuration_path: ".github/pr-title-checker-config.json" diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index 92e7452334c27..1baa2a42a7cb4 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -38,7 +38,7 @@ jobs: packages: write # for uploading attestations. (https://github.com/slsa-framework/slsa-github-generator/blob/main/internal/builders/container/README.md#known-issues) # Must be refernced by a tag. https://github.com/slsa-framework/slsa-github-generator/blob/main/internal/builders/container/README.md#referencing-the-slsa-generator if: github.repository == 'argoproj/argo-cd' - uses: slsa-framework/slsa-github-generator/.github/workflows/generator_container_slsa3.yml@v1.7.0 + uses: slsa-framework/slsa-github-generator/.github/workflows/generator_container_slsa3.yml@v1.6.0 with: image: quay.io/argoproj/argocd digest: ${{ needs.argocd-image.outputs.image-digest }} @@ -59,7 +59,7 @@ jobs: steps: - name: Checkout code - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 with: fetch-depth: 0 token: ${{ secrets.GITHUB_TOKEN }} @@ -88,7 +88,7 @@ jobs: echo "GIT_TREE_STATE=$(if [ -z "`git status --porcelain`" ]; then echo "clean" ; else echo "dirty"; fi)" >> $GITHUB_ENV - name: Run GoReleaser - uses: goreleaser/goreleaser-action@336e29918d653399e599bfca99fadc1d7ffbc9f7 # v4.3.0 + uses: goreleaser/goreleaser-action@f82d6c1c344bcacabba2c841718984797f664a6b # v4.2.0 id: run-goreleaser with: version: latest @@ -120,7 +120,7 @@ jobs: contents: write # Needed for release uploads if: github.repository == 'argoproj/argo-cd' # Must be refernced by a tag. https://github.com/slsa-framework/slsa-github-generator/blob/main/internal/builders/container/README.md#referencing-the-slsa-generator - uses: slsa-framework/slsa-github-generator/.github/workflows/generator_generic_slsa3.yml@v1.7.0 + uses: slsa-framework/slsa-github-generator/.github/workflows/generator_generic_slsa3.yml@v1.6.0 with: base64-subjects: "${{ needs.goreleaser.outputs.hashes }}" provenance-name: "argocd-cli.intoto.jsonl" @@ -138,7 +138,7 @@ jobs: runs-on: ubuntu-22.04 steps: - name: Checkout code - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.2.0 + uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.2.0 with: fetch-depth: 0 token: ${{ secrets.GITHUB_TOKEN }} @@ -211,7 +211,7 @@ jobs: runs-on: ubuntu-22.04 steps: - name: Checkout code - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.2.0 + uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.2.0 with: fetch-depth: 0 token: ${{ secrets.GITHUB_TOKEN }} @@ -270,7 +270,7 @@ jobs: if: ${{ env.UPDATE_VERSION == 'true' }} - name: Create PR to update VERSION on master branch - uses: peter-evans/create-pull-request@153407881ec5c347639a548ade7d8ad1d6740e38 # v5.0.2 + uses: peter-evans/create-pull-request@284f54f989303d2699d373481a0cfa13ad5a6666 # v5.0.1 with: commit-message: Bump version in master title: "chore: Bump version in master" diff --git a/.github/workflows/scorecard.yaml b/.github/workflows/scorecard.yaml index 3fdfdb4a3e8c6..1f1a8c5b81a5b 100644 --- a/.github/workflows/scorecard.yaml +++ b/.github/workflows/scorecard.yaml @@ -30,7 +30,7 @@ jobs: steps: - name: "Checkout code" - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 with: persist-credentials: false diff --git a/.github/workflows/update-snyk.yaml b/.github/workflows/update-snyk.yaml index ca004f398df60..9e09a1d555b05 100644 --- a/.github/workflows/update-snyk.yaml +++ b/.github/workflows/update-snyk.yaml @@ -17,7 +17,7 @@ jobs: runs-on: ubuntu-22.04 steps: - name: Checkout code - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2 with: token: ${{ secrets.GITHUB_TOKEN }} - name: Build reports diff --git a/USERS.md b/USERS.md index cf455d2ab87e7..f02187afc2fee 100644 --- a/USERS.md +++ b/USERS.md @@ -155,7 +155,6 @@ Currently, the following organizations are **officially** using Argo CD: 1. [Max Kelsen](https://www.maxkelsen.com/) 1. [MeDirect](https://medirect.com.mt/) 1. [Meican](https://meican.com/) -1. [Meilleurs Agents](https://www.meilleursagents.com/) 1. [Mercedes-Benz Tech Innovation](https://www.mercedes-benz-techinnovation.com/) 1. [Metanet](http://www.metanet.co.kr/en/) 1. [MindSpore](https://mindspore.cn) diff --git a/cmd/argocd/commands/app_test.go b/cmd/argocd/commands/app_test.go index 808efa5ad57da..769f6a12f9fda 100644 --- a/cmd/argocd/commands/app_test.go +++ b/cmd/argocd/commands/app_test.go @@ -1,6 +1,7 @@ package commands import ( + "context" "fmt" "os" "testing" @@ -13,7 +14,9 @@ import ( "github.com/argoproj/gitops-engine/pkg/utils/kube" "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" + "github.com/spf13/cobra" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/util/intstr" @@ -1518,3 +1521,153 @@ func testApp(name, project string, labels map[string]string, annotations map[str }, } } + +type MockPodLogsServer struct { + mock.Mock +} + +type MockLogEntry struct { + Content *string + TimeStamp *time.Time + Last *bool + TimeStampStr *string + PodName *string +} + +func (m *MockPodLogsServer) Send(entry *MockLogEntry) error { + args := m.Called(entry) + return args.Error(0) +} + +func (m *MockPodLogsServer) Context() context.Context { + args := m.Called() + return args.Get(0).(context.Context) +} + +func NewApplicationLogsTestCommand() *cobra.Command { + var command = &cobra.Command{ + Use: "logs", + Short: "logs", + RunE: func(cmd *cobra.Command, args []string) error { + return nil + }, + } + return command +} + +func TestNewApplicationLogsCommand_BasicLogStreaming(t *testing.T) { + mockServer := new(MockPodLogsServer) + mockServer.On("Context").Return(context.Background()) + + now := time.Now() + content := "test" + timeStamp := &now + last := false + timeStampStr := "2020-01-01" + podName := "pod-1" + + logEntry := &MockLogEntry{ + Content: &content, + TimeStamp: timeStamp, + Last: &last, + TimeStampStr: &timeStampStr, + PodName: &podName, + } + + mockServer.On("Send", logEntry).Return(nil) + + cmd := &cobra.Command{} + cmd.Flags().String("app", "my-app", "Application name") + cmd.Flags().String("namespace", "my-namespace", "Namespace") + cmd.Flags().String("pod", "pod-1", "Pod name") + + appLogsCmd := NewApplicationLogsTestCommand() + err := appLogsCmd.RunE(cmd, []string{}) + + assert.NoError(t, err) +} + +func TestNewApplicationLogsCommand_TimeBasedFilter(t *testing.T) { + mockServer := new(MockPodLogsServer) + mockServer.On("Context").Return(context.Background()) + + now := time.Now() + content := "test" + timeStamp := &now + last := false + timeStampStr := "2023-06-12 10:00:00" + podName := "pod-1" + + logEntry := &MockLogEntry{ + Content: &content, + TimeStamp: timeStamp, + Last: &last, + TimeStampStr: &timeStampStr, + PodName: &podName, + } + + mockServer.On("Send", logEntry).Return(nil) + + cmd := &cobra.Command{} + cmd.Flags().String("app", "my-app", "Application name") + cmd.Flags().String("namespace", "my-namespace", "Namespace") + cmd.Flags().String("pod", "pod-1", "Pod name") + cmd.Flags().String("since-time", "2023-06-12T09:00:00Z", "Logs since the specified time") + + appLogsCmd := NewApplicationLogsTestCommand() + err := appLogsCmd.RunE(cmd, []string{}) + + assert.NoError(t, err) +} + +func TestNewApplicationLogsCommand_TailingAndFiltering(t *testing.T) { + mockServer := new(MockPodLogsServer) + mockServer.On("Context").Return(context.Background()) + + now := time.Now() + content := "test" + timeStamp := &now + last := false + timeStampStr := "2023-06-12 10:00:00" + podName := "pod-1" + + logEntry := &MockLogEntry{ + Content: &content, + TimeStamp: timeStamp, + Last: &last, + TimeStampStr: &timeStampStr, + PodName: &podName, + } + + mockServer.On("Send", logEntry).Return(nil) + + cmd := &cobra.Command{} + cmd.Flags().String("app", "my-app", "Application name") + cmd.Flags().String("namespace", "my-namespace", "Namespace") + cmd.Flags().String("pod", "pod-1", "Pod name") + cmd.Flags().Int("tail", 10, "Number of lines to tail from logs") + cmd.Flags().String("filter", "ERROR", "Filter logs based on the specified string") + + appLogsCmd := NewApplicationLogsTestCommand() + err := appLogsCmd.RunE(cmd, []string{}) + + assert.NoError(t, err) +} + +func TestNewApplicationLogsCommand_NonExistentAppOrPod(t *testing.T) { + cmd := &cobra.Command{} + cmd.Flags().String("app", "non-existent-app", "Non-existent application name") + cmd.Flags().String("namespace", "my-namespace", "Namespace") + cmd.Flags().String("pod", "", "Pod name") + + appLogsCmd := NewApplicationLogsTestCommand() + + appLogsCmd.RunE = func(cmd *cobra.Command, args []string) error { + return fmt.Errorf("Failed to get logs for non-existent application or pod") + } + + err := appLogsCmd.RunE(cmd, []string{}) + + expectedError := fmt.Errorf("Failed to get logs for non-existent application or pod") + assert.EqualError(t, err, expectedError.Error(), "Expected an error for non-existent application or pod") +} diff --git a/docs/proposals/002-ui-extensions.md b/docs/proposals/002-ui-extensions.md index 583888da68c66..8fa02d25fd11c 100644 --- a/docs/proposals/002-ui-extensions.md +++ b/docs/proposals/002-ui-extensions.md @@ -63,7 +63,7 @@ As an operator, I would like to configure Argo CD to perform pre-defined actions ## Proposal -A new `ArgoCDExtension` CRD would be introduced which will allow operators configure Argo CD to understand how to handle and visualize custom resources. Visualizing a object requires javascript to render the object, and health/actions require lua scripts. As such, the extension CR would need to point to some location where the javascript/lua code would be hosted. +A new `ArgoCDExtension` CRD would be introduced which will allow operators configure Argo CD to understand how to handle and visualize custom resources. Visualizing a object requires javascript to render the object, and health/actions require lua scripts. Aas such, the extension CR would need to point to some location where the javascript/lua code would be hosted. It is proposed that a git repository be used to contain the javascript code, as well as the lua scripts necessary to assess health or perform actions of a resource. diff --git a/docs/proposals/rebalancing-clusters-across-shards-dynamically.md b/docs/proposals/rebalancing-clusters-across-shards-dynamically.md deleted file mode 100644 index 63ed973004cf5..0000000000000 --- a/docs/proposals/rebalancing-clusters-across-shards-dynamically.md +++ /dev/null @@ -1,142 +0,0 @@ ---- -title: Neat-enhancement-idea -authors: - - "@ishitasequeira" # Authors' github accounts here. -sponsors: - - TBD # List all interested parties here. -reviewers: - - "@alexmt" - - TBD -approvers: - - "@alexmt" - - TBD - -creation-date: yyyy-mm-dd -last-updated: yyyy-mm-dd ---- - -# Neat Enhancement Idea - -Rebalance clusters across shards automatically on changes to the number of available shards. - - -## Open Questions [optional] - -This is where to call out areas of the design that require closure before deciding to implement the -design. - - -## Summary - -Current implementation of sharding uses StatefulSet for the application controller and the goal is to move towards an agile stateless Deployment. Although the application controller does not have any state to preserve, stateful sets were used to get predictable hostnames and the serial number in the hostname was used to get the shard id of a particular instance. Using StatefulSet has the following limitations: - -Any change done to the StatefulSet would cause all the child pods to restart in a serial fashion. This makes scaling up/down of the application controller slow as even existing healthy instances need to be restarted as well. -Scaling up or down happens one at a time. If there are 10 instances and if scaled to 20, then the scaling happens one at a time, causing considerable delay for the scaling to complete. - -Each shard replica knows about the total number of available shards by evaluating the environment variable ARGOCD_CONTROLLER_REPLICAS, which needs to be kept up-to-date with the actual number of available replicas (shards). If the number of replicas does not equal the number set in ARGOCD_CONTROLLER_REPLICAS, sharding will not work as intended, leading to both, unused and overused replicas. As this environment variable is set on the StatefulSet and propagated to the pods, all the pods in the StatefulSet need to be restarted in order to pick up the new number of total shards. - -The current sharding mechanism relies on predictable pod names for the application controller to determine which shard a given replica should impersonate, e.g. the first replica of the StatefulSet (argocd-application-controller-0) will be the first shard, the second replica (argocd-application-controller-1) will be the second and so forth. - -## Motivation - -If the number of available shards is changed (i.e. one or more application controller replicas are added or removed), all pods in the statefulset have to be restarted so that the managed clusters are redistributed over the available shards. Additionally, the application controller workload is deployed as a StatefulSet, which is not intended for dynamic horizontal scaling. - -### Goals - -- Improve the application controller’s ability to scale horizontally with a growing number of clusters -- Remove the need to run application controller as a StatefulSet workload - -### Non-Goals - -- Expand the scope of sharding to other assets than clusters (e.g. applications) -- Make a single shard highly available (e.g. by having 2 or more replicas by shard) - -## Proposal - -### Why use Deployments instead of StatefulSet: -StatefulSet is a Kubernetes resource that manages multiple pods that have unique identities, and are not interchangeable (unlike a regular Kubernetes Deployment, in which pods are stateless and can be destroyed and recreated as often as needed). - -Stateless applications scale horizontally very easily as compared to stateful applications due to the fact that infrastructure allows adding as many computing resources as needed. Changing the StatefulSet to Deployments for Application Controller will allow us to dynamically scale the replicas without restarting existing application controller pods. Also, the shard to application controller assignment would help in making sure the shards are scaled and distributed across the available healhty replicas of application controllers. - -### Distributing shards among Application Controllers: - -Inorder to be able to accurately know which shards are being managed by which application-controller, especially in scenarios of redistribution of load, addition/removal of `application controller`, etc., we would need to have a mechanism to assign clusters to the shards. - -In most scenarios, the service account used by the application controller has read access to all the resources in the cluster. Thus, instead of setting the environment variable ARGOCD_CONTROLLER_REPLICAS representing the number of replicas, the number of replicas can be read directly from the number of healthy replicas of the application controller deployment. - -For other scenarios, some users install controller with only `argocd-application-controller-role` role and use it to manage remote clusters only. In this case, we would need to update the `argocd-application-controller-role` role and allow controller inspect it's own deployment and find out the number of replicas. - -The application controllers will claim one of the available shards by checking which shard is not present in the ConfigMap or is assigned to an unhealthy controller. We will store the assignment list of Application Controller to Shard in ConfigMap. The mapping of Application Controller to Shard will store the below information: - -* Name/Id of the shard -* Name of the Application Controller currently managing the shard -* Last time of successful update to ConfigMap (Heartbeat) - -The mapping will be updated in ConfigMap every X (heartbeat interval) seconds with the help of heartbeat process performed by every application controller. If the heartbeat was not performed by the application controller for a certain time, the application controller is assumed to be unhealthy and the number of healthy/managed shards would be reduced, that is, the number of healthy replicas of the application controller deployment changes. - -The heartbeat interval will be a configurable parameter initialized while setting up the application controller. This way, users will be able to control the frequency at which they want the heartbeat process to take place. - -As part of the readiness probe, we will also add a check whether application controller was able to claim a shard successfully or not. If the shard claim failed, the readiness probe will fail marking the controller as unhealthy. Anytime the number of healthy replicas of application controllers is different from the number of application controllers to shard mappings, we would re-distribute the clusters among the healthy replicas again. We can summarize the above statement using the below formula: - -``` -Number of Replicas ≠Count of {Application Controller, Shard} mapping -``` - -The below logic can be used to perform application controller to shard assignment: - -1) If a new application controller is added, that is, a new shard is added, we would perform the re-distribution of clusters among the shards with the existing sharding algorithm being used. - -2) In scenarios when one of the application controllers is identified to be unhealthy, we will not trigger the re-ditribution of clusters across shards. The new instance of the application controller will claim this unassigned shard and start managing the shard. - -How will this work? -* The application controller will query the ConfigMap for the status of all the application controllers and last updated heartbeat timestamps. -* It will check if any application controller is flagged as Unhealthy or has not updated its status in ConfigMap during the heartbeat process for a certain period of time. -* If the status for an application controller was already flagged as Unhealthy, we will not re-trigger the redistribution of clusters across healthy shards. The new application controller will come online and try to claim this unassigned shard. -* If the status is not flagged and an application controller has not updated the last active timestamp in a long time, then we mark the Application Controller as Unhealthy and unassign the shard in the ConfigMap. - -*Note:* We will continue to use the cluster to shard assignment approach being used today. - -### Pros -* Every Application Controller would be able to take action on finding the distribution of load. -* Every Application Controller will monitor the status of Unhealthy shard and would be able to take action or flag for action. - -### Cons - -* ~~Possibility of race conditions while flagging the shard as Unhealthy during the heartbeat process. Although this can be handled using the [distributed locks](https://redis.io/docs/manual/patterns/distributed-locks/) in Redis.~~ -As we are using ConfigMap, this Con get's removed. Kubernetes would give conflict errors in case multiple edits are tried on the ConfigMap at the same time. We can leverage this error messages to avoid race conditions. - -* ~~In scenarios when Redis becomes unavailable, the heartbeat mechanism will pause working till the redis comes back online again. This will also pause the dynamic redistribution of clusters till Redis comes back online. The redistribution of clusters will be triggered again when Redis comes back online.~~ We would not see this issue by using ConfigMap instead of Redis. - - -### Security Considerations - -* This would be a breaking change of converting StatefulSets to Deployments. Any automation done by customers which is based on the assumption that the controller is modelled as a StatefulSet would break with this change. - -* ~~We would rely on Redis to store the current Application Controller to Shard mapping. In case the Redis is not available, it would not affect the regular working of ArgoCD. The dynamic distribution of clusters among healthy shards would stop working with the heartbeat process till Redis comes back up online, but the application controllers will continue managing their workloads.~~ We would not rely on Redis by using ConfigMap avoiding this issue. - - -### Upgrade / Downgrade Strategy - -* Working ArgoCD itself should not affected. An initial restart of all the application controller pods is expected when we switch from StatefulSet to Deployment or vice-versa. - -* There would be some initial delays in the reconciliation process during the transistion from StatefulSet to Deployment. If someone is not using sharding at all, they should not face any issues. - -## Alternatives - -An alternative approach would be to use Leader Election strategy. By implementing leader election, one of the healthy application controllers will be appointed as leader. The leader controller will be responsible for assigning clusters to the shards and balancing load across the shards. - -The leader controller will continue sending heartbeats to every replica controller and monitor the health of the controllers. In case one of the replica controllers crashes, the leader will distribute the shards managed by the unhealthy replica among the healthy replicas. - -If the leader goes down, the leader election process will be initiated among the healthy candidates and one of the candidates will be marked as leader who will perform the heartbeat process and redistribution of resources. - -One of the possible examples for selecting the leader is by checking the load handled by each healthy candidate and selecting the candidate which has the least load / number of resources running on it. - -### Pros of Leader Election - -* We can refrain from performing multiple calls to ConfigMap about the load and status of the shards and store it in a local cache within the leader while updating data in ConfigMap on a timely manner (for e.g. every 10 mins). -* Single leaders can easily offer clients consistency because they can see and control all the changes made to the state of the system. - - -### Cons of Leader Election -* A single leader is a single point of failure. If the leader becomes bad, that is, does not distribute clusters properly across shards, it is very difficult to identify or fix the bad behavior and can become a single point of failure -* A single leader means a single point of scaling, both in data size and request rate. When a leader-elected system needs to grow beyond a single leader, it requires a complete re-architecture. diff --git a/docs/snyk/index.md b/docs/snyk/index.md index 2083ce137778f..1e16212f16796 100644 --- a/docs/snyk/index.md +++ b/docs/snyk/index.md @@ -15,48 +15,48 @@ recent minor releases. |---:|:--------:|:----:|:------:|:---:| | [go.mod](master/argocd-test.html) | 0 | 0 | 0 | 0 | | [ui/yarn.lock](master/argocd-test.html) | 0 | 1 | 0 | 0 | -| [dex:v2.36.0](master/ghcr.io_dexidp_dex_v2.36.0.html) | 0 | 2 | 3 | 0 | -| [haproxy:2.6.12-alpine](master/haproxy_2.6.12-alpine.html) | 0 | 1 | 1 | 0 | -| [argocd:latest](master/quay.io_argoproj_argocd_latest.html) | 0 | 0 | 1 | 18 | -| [redis:7.0.11-alpine](master/redis_7.0.11-alpine.html) | 0 | 1 | 0 | 0 | +| [dex:v2.36.0](master/ghcr.io_dexidp_dex_v2.36.0.html) | 0 | 1 | 3 | 1 | +| [haproxy:2.6.12-alpine](master/haproxy_2.6.12-alpine.html) | 0 | 0 | 1 | 1 | +| [argocd:latest](master/quay.io_argoproj_argocd_latest.html) | 0 | 0 | 2 | 18 | +| [redis:7.0.11-alpine](master/redis_7.0.11-alpine.html) | 0 | 0 | 0 | 1 | | [install.yaml](master/argocd-iac-install.html) | - | - | - | - | | [namespace-install.yaml](master/argocd-iac-namespace-install.html) | - | - | - | - | -### v2.7.4 +### v2.7.3 | | Critical | High | Medium | Low | |---:|:--------:|:----:|:------:|:---:| -| [go.mod](v2.7.4/argocd-test.html) | 0 | 0 | 0 | 0 | -| [ui/yarn.lock](v2.7.4/argocd-test.html) | 0 | 0 | 0 | 0 | -| [dex:v2.36.0](v2.7.4/ghcr.io_dexidp_dex_v2.36.0.html) | 0 | 2 | 3 | 0 | -| [haproxy:2.6.12-alpine](v2.7.4/haproxy_2.6.12-alpine.html) | 0 | 1 | 1 | 0 | -| [argocd:v2.7.4](v2.7.4/quay.io_argoproj_argocd_v2.7.4.html) | 0 | 0 | 1 | 18 | -| [redis:7.0.11-alpine](v2.7.4/redis_7.0.11-alpine.html) | 0 | 1 | 0 | 0 | -| [install.yaml](v2.7.4/argocd-iac-install.html) | - | - | - | - | -| [namespace-install.yaml](v2.7.4/argocd-iac-namespace-install.html) | - | - | - | - | +| [go.mod](v2.7.3/argocd-test.html) | 0 | 0 | 0 | 0 | +| [ui/yarn.lock](v2.7.3/argocd-test.html) | 0 | 0 | 0 | 0 | +| [dex:v2.36.0](v2.7.3/ghcr.io_dexidp_dex_v2.36.0.html) | 0 | 1 | 3 | 1 | +| [haproxy:2.6.12-alpine](v2.7.3/haproxy_2.6.12-alpine.html) | 0 | 0 | 1 | 1 | +| [argocd:v2.7.3](v2.7.3/quay.io_argoproj_argocd_v2.7.3.html) | 0 | 0 | 3 | 19 | +| [redis:7.0.11-alpine](v2.7.3/redis_7.0.11-alpine.html) | 0 | 0 | 0 | 1 | +| [install.yaml](v2.7.3/argocd-iac-install.html) | - | - | - | - | +| [namespace-install.yaml](v2.7.3/argocd-iac-namespace-install.html) | - | - | - | - | -### v2.6.9 +### v2.6.8 | | Critical | High | Medium | Low | |---:|:--------:|:----:|:------:|:---:| -| [go.mod](v2.6.9/argocd-test.html) | 0 | 0 | 0 | 0 | -| [ui/yarn.lock](v2.6.9/argocd-test.html) | 0 | 0 | 0 | 0 | -| [dex:v2.36.0](v2.6.9/ghcr.io_dexidp_dex_v2.36.0.html) | 0 | 2 | 3 | 0 | -| [haproxy:2.6.12-alpine](v2.6.9/haproxy_2.6.12-alpine.html) | 0 | 1 | 1 | 0 | -| [argocd:v2.6.9](v2.6.9/quay.io_argoproj_argocd_v2.6.9.html) | 0 | 0 | 1 | 18 | -| [redis:7.0.11-alpine](v2.6.9/redis_7.0.11-alpine.html) | 0 | 1 | 0 | 0 | -| [install.yaml](v2.6.9/argocd-iac-install.html) | - | - | - | - | -| [namespace-install.yaml](v2.6.9/argocd-iac-namespace-install.html) | - | - | - | - | +| [go.mod](v2.6.8/argocd-test.html) | 0 | 0 | 0 | 0 | +| [ui/yarn.lock](v2.6.8/argocd-test.html) | 0 | 0 | 0 | 0 | +| [dex:v2.36.0](v2.6.8/ghcr.io_dexidp_dex_v2.36.0.html) | 0 | 1 | 3 | 1 | +| [haproxy:2.6.12-alpine](v2.6.8/haproxy_2.6.12-alpine.html) | 0 | 0 | 1 | 1 | +| [argocd:v2.6.8](v2.6.8/quay.io_argoproj_argocd_v2.6.8.html) | 0 | 0 | 3 | 19 | +| [redis:7.0.11-alpine](v2.6.8/redis_7.0.11-alpine.html) | 0 | 0 | 0 | 1 | +| [install.yaml](v2.6.8/argocd-iac-install.html) | - | - | - | - | +| [namespace-install.yaml](v2.6.8/argocd-iac-namespace-install.html) | - | - | - | - | -### v2.5.18 +### v2.5.17 | | Critical | High | Medium | Low | |---:|:--------:|:----:|:------:|:---:| -| [go.mod](v2.5.18/argocd-test.html) | 0 | 0 | 2 | 0 | -| [ui/yarn.lock](v2.5.18/argocd-test.html) | 0 | 0 | 4 | 0 | -| [dex:v2.36.0](v2.5.18/ghcr.io_dexidp_dex_v2.36.0.html) | 0 | 2 | 3 | 0 | -| [haproxy:2.6.12-alpine](v2.5.18/haproxy_2.6.12-alpine.html) | 0 | 1 | 1 | 0 | -| [argocd:v2.5.18](v2.5.18/quay.io_argoproj_argocd_v2.5.18.html) | 0 | 0 | 1 | 18 | -| [redis:7.0.11-alpine](v2.5.18/redis_7.0.11-alpine.html) | 0 | 1 | 0 | 0 | -| [install.yaml](v2.5.18/argocd-iac-install.html) | - | - | - | - | -| [namespace-install.yaml](v2.5.18/argocd-iac-namespace-install.html) | - | - | - | - | +| [go.mod](v2.5.17/argocd-test.html) | 0 | 0 | 2 | 0 | +| [ui/yarn.lock](v2.5.17/argocd-test.html) | 0 | 0 | 4 | 0 | +| [dex:v2.36.0](v2.5.17/ghcr.io_dexidp_dex_v2.36.0.html) | 0 | 1 | 3 | 1 | +| [haproxy:2.6.12-alpine](v2.5.17/haproxy_2.6.12-alpine.html) | 0 | 0 | 1 | 1 | +| [argocd:v2.5.17](v2.5.17/quay.io_argoproj_argocd_v2.5.17.html) | 0 | 0 | 3 | 19 | +| [redis:7.0.11-alpine](v2.5.17/redis_7.0.11-alpine.html) | 0 | 0 | 0 | 1 | +| [install.yaml](v2.5.17/argocd-iac-install.html) | - | - | - | - | +| [namespace-install.yaml](v2.5.17/argocd-iac-namespace-install.html) | - | - | - | - | diff --git a/docs/snyk/master/argocd-iac-install.html b/docs/snyk/master/argocd-iac-install.html index 2c9c5b2ca1223..3861107a41fba 100644 --- a/docs/snyk/master/argocd-iac-install.html +++ b/docs/snyk/master/argocd-iac-install.html @@ -456,7 +456,7 @@
Note: Versions mentioned in the description apply only to the upstream openssl package and not the openssl package as distributed by Alpine:3.17.
See How to fix? for Alpine:3.17 relevant fixed versions and status.
Issue summary: Processing some specially crafted ASN.1 object identifiers or - data containing them may be very slow.
-Impact summary: Applications that use OBJ_obj2txt() directly, or use any of - the OpenSSL subsystems OCSP, PKCS7/SMIME, CMS, CMP/CRMF or TS with no message - size limit may experience notable to very long delays when processing those - messages, which may lead to a Denial of Service.
-An OBJECT IDENTIFIER is composed of a series of numbers - sub-identifiers - - most of which have no size limit. OBJ_obj2txt() may be used to translate - an ASN.1 OBJECT IDENTIFIER given in DER encoding form (using the OpenSSL - type ASN1_OBJECT) to its canonical numeric text form, which are the - sub-identifiers of the OBJECT IDENTIFIER in decimal form, separated by - periods.
-When one of the sub-identifiers in the OBJECT IDENTIFIER is very large - (these are sizes that are seen as absurdly large, taking up tens or hundreds - of KiBs), the translation to a decimal number in text may take a very long - time. The time complexity is O(n^2) with 'n' being the size of the - sub-identifiers in bytes (*).
-With OpenSSL 3.0, support to fetch cryptographic algorithms using names / - identifiers in string form was introduced. This includes using OBJECT - IDENTIFIERs in canonical numeric text form as identifiers for fetching - algorithms.
-Such OBJECT IDENTIFIERs may be received through the ASN.1 structure - AlgorithmIdentifier, which is commonly used in multiple protocols to specify - what cryptographic algorithm should be used to sign or verify, encrypt or - decrypt, or digest passed data.
-Applications that call OBJ_obj2txt() directly with untrusted data are - affected, with any version of OpenSSL. If the use is for the mere purpose - of display, the severity is considered low.
-In OpenSSL 3.0 and newer, this affects the subsystems OCSP, PKCS7/SMIME, - CMS, CMP/CRMF or TS. It also impacts anything that processes X.509 - certificates, including simple things like verifying its signature.
-The impact on TLS is relatively low, because all versions of OpenSSL have a - 100KiB limit on the peer's certificate chain. Additionally, this only - impacts clients, or servers that have explicitly enabled client - authentication.
-In OpenSSL 1.1.1 and 1.0.2, this only affects displaying diverse objects, - such as X.509 certificates. This is assumed to not happen in such a way - that it would cause a Denial of Service, so these versions are considered - not affected by this issue in such a way that it would be cause for concern, - and the severity is therefore considered low.
+Applications that use a non-default option when verifying certificates may be + vulnerable to an attack from a malicious CA to circumvent certain checks.
+Invalid certificate policies in leaf certificates are silently ignored by + OpenSSL and other certificate policy checks are skipped for that certificate. + A malicious CA could use this to deliberately assert invalid certificate policies + in order to circumvent policy checking on the certificate altogether.
+Policy processing is disabled by default but can be enabled by passing
+ the -policy' argument to the command line utilities or by calling the X509_VERIFY_PARAM_set1_policies()' function.
Upgrade Alpine:3.17 openssl to version 3.0.9-r0 or higher.
Upgrade Alpine:3.17 openssl to version 3.0.8-r2 or higher.
Note: Versions mentioned in the description apply only to the upstream openssl package and not the openssl package as distributed by Alpine:3.17.
See How to fix? for Alpine:3.17 relevant fixed versions and status.
Applications that use a non-default option when verifying certificates may be - vulnerable to an attack from a malicious CA to circumvent certain checks.
-Invalid certificate policies in leaf certificates are silently ignored by - OpenSSL and other certificate policy checks are skipped for that certificate. - A malicious CA could use this to deliberately assert invalid certificate policies - in order to circumvent policy checking on the certificate altogether.
-Policy processing is disabled by default but can be enabled by passing
- the -policy' argument to the command line utilities or by calling the X509_VERIFY_PARAM_set1_policies()' function.
The function X509_VERIFY_PARAM_add0_policy() is documented to + implicitly enable the certificate policy check when doing certificate + verification. However the implementation of the function does not + enable the check which allows certificates with invalid or incorrect + policies to pass the certificate verification.
+As suddenly enabling the policy check could break existing deployments it was + decided to keep the existing behavior of the X509_VERIFY_PARAM_add0_policy() + function.
+Instead the applications that require OpenSSL to perform certificate + policy check need to use X509_VERIFY_PARAM_set1_policies() or explicitly + enable the policy check by calling X509_VERIFY_PARAM_set_flags() with + the X509_V_FLAG_POLICY_CHECK flag argument.
+Certificate policy checks are disabled by default in OpenSSL and are not + commonly used by applications.
Upgrade Alpine:3.17 openssl to version 3.0.8-r2 or higher.
Upgrade Alpine:3.17 openssl to version 3.0.8-r3 or higher.
Note: Versions mentioned in the description apply only to the upstream openssl package and not the openssl package as distributed by Alpine:3.17.
See How to fix? for Alpine:3.17 relevant fixed versions and status.
The function X509_VERIFY_PARAM_add0_policy() is documented to - implicitly enable the certificate policy check when doing certificate - verification. However the implementation of the function does not - enable the check which allows certificates with invalid or incorrect - policies to pass the certificate verification.
-As suddenly enabling the policy check could break existing deployments it was - decided to keep the existing behavior of the X509_VERIFY_PARAM_add0_policy() - function.
-Instead the applications that require OpenSSL to perform certificate - policy check need to use X509_VERIFY_PARAM_set1_policies() or explicitly - enable the policy check by calling X509_VERIFY_PARAM_set_flags() with - the X509_V_FLAG_POLICY_CHECK flag argument.
-Certificate policy checks are disabled by default in OpenSSL and are not - commonly used by applications.
+Issue summary: The AES-XTS cipher decryption implementation for 64 bit ARM + platform contains a bug that could cause it to read past the input buffer, + leading to a crash.
+Impact summary: Applications that use the AES-XTS algorithm on the 64 bit ARM + platform can crash in rare circumstances. The AES-XTS algorithm is usually + used for disk encryption.
+The AES-XTS cipher decryption implementation for 64 bit ARM platform will read + past the end of the ciphertext buffer if the ciphertext size is 4 mod 5 in 16 + byte blocks, e.g. 144 bytes or 1024 bytes. If the memory after the ciphertext + buffer is unmapped, this will trigger a crash which results in a denial of + service.
+If an attacker can control the size and location of the ciphertext buffer + being decrypted by an application using AES-XTS on 64 bit ARM, the + application is affected. This is fairly unlikely making this issue + a Low severity one.
Upgrade Alpine:3.17 openssl to version 3.0.8-r3 or higher.
Upgrade Alpine:3.17 openssl to version 3.0.8-r4 or higher.
Note: Versions mentioned in the description apply only to the upstream openssl package and not the openssl package as distributed by Alpine:3.17.
See How to fix? for Alpine:3.17 relevant fixed versions and status.
Issue summary: The AES-XTS cipher decryption implementation for 64 bit ARM - platform contains a bug that could cause it to read past the input buffer, - leading to a crash.
-Impact summary: Applications that use the AES-XTS algorithm on the 64 bit ARM - platform can crash in rare circumstances. The AES-XTS algorithm is usually - used for disk encryption.
-The AES-XTS cipher decryption implementation for 64 bit ARM platform will read - past the end of the ciphertext buffer if the ciphertext size is 4 mod 5 in 16 - byte blocks, e.g. 144 bytes or 1024 bytes. If the memory after the ciphertext - buffer is unmapped, this will trigger a crash which results in a denial of - service.
-If an attacker can control the size and location of the ciphertext buffer - being decrypted by an application using AES-XTS on 64 bit ARM, the - application is affected. This is fairly unlikely making this issue - a Low severity one.
+Issue summary: Processing some specially crafted ASN.1 object identifiers or + data containing them may be very slow.
+Impact summary: Applications that use OBJ_obj2txt() directly, or use any of + the OpenSSL subsystems OCSP, PKCS7/SMIME, CMS, CMP/CRMF or TS with no message + size limit may experience notable to very long delays when processing those + messages, which may lead to a Denial of Service.
+An OBJECT IDENTIFIER is composed of a series of numbers - sub-identifiers - + most of which have no size limit. OBJ_obj2txt() may be used to translate + an ASN.1 OBJECT IDENTIFIER given in DER encoding form (using the OpenSSL + type ASN1_OBJECT) to its canonical numeric text form, which are the + sub-identifiers of the OBJECT IDENTIFIER in decimal form, separated by + periods.
+When one of the sub-identifiers in the OBJECT IDENTIFIER is very large + (these are sizes that are seen as absurdly large, taking up tens or hundreds + of KiBs), the translation to a decimal number in text may take a very long + time. The time complexity is O(n^2) with 'n' being the size of the + sub-identifiers in bytes (*).
+With OpenSSL 3.0, support to fetch cryptographic algorithms using names / + identifiers in string form was introduced. This includes using OBJECT + IDENTIFIERs in canonical numeric text form as identifiers for fetching + algorithms.
+Such OBJECT IDENTIFIERs may be received through the ASN.1 structure + AlgorithmIdentifier, which is commonly used in multiple protocols to specify + what cryptographic algorithm should be used to sign or verify, encrypt or + decrypt, or digest passed data.
+Applications that call OBJ_obj2txt() directly with untrusted data are + affected, with any version of OpenSSL. If the use is for the mere purpose + of display, the severity is considered low.
+In OpenSSL 3.0 and newer, this affects the subsystems OCSP, PKCS7/SMIME, + CMS, CMP/CRMF or TS. It also impacts anything that processes X.509 + certificates, including simple things like verifying its signature.
+The impact on TLS is relatively low, because all versions of OpenSSL have a + 100KiB limit on the peer's certificate chain. Additionally, this only + impacts clients, or servers that have explicitly enabled client + authentication.
+In OpenSSL 1.1.1 and 1.0.2, this only affects displaying diverse objects, + such as X.509 certificates. This is assumed to not happen in such a way + that it would cause a Denial of Service, so these versions are considered + not affected by this issue in such a way that it would be cause for concern, + and the severity is therefore considered low.
Upgrade Alpine:3.17 openssl to version 3.0.8-r4 or higher.
Upgrade Alpine:3.17 openssl to version 3.0.9-r0 or higher.
Note: Versions mentioned in the description apply only to the upstream openssl package and not the openssl package as distributed by Alpine:3.17.
See How to fix? for Alpine:3.17 relevant fixed versions and status.
Issue summary: Processing some specially crafted ASN.1 object identifiers or - data containing them may be very slow.
-Impact summary: Applications that use OBJ_obj2txt() directly, or use any of - the OpenSSL subsystems OCSP, PKCS7/SMIME, CMS, CMP/CRMF or TS with no message - size limit may experience notable to very long delays when processing those - messages, which may lead to a Denial of Service.
-An OBJECT IDENTIFIER is composed of a series of numbers - sub-identifiers - - most of which have no size limit. OBJ_obj2txt() may be used to translate - an ASN.1 OBJECT IDENTIFIER given in DER encoding form (using the OpenSSL - type ASN1_OBJECT) to its canonical numeric text form, which are the - sub-identifiers of the OBJECT IDENTIFIER in decimal form, separated by - periods.
-When one of the sub-identifiers in the OBJECT IDENTIFIER is very large - (these are sizes that are seen as absurdly large, taking up tens or hundreds - of KiBs), the translation to a decimal number in text may take a very long - time. The time complexity is O(n^2) with 'n' being the size of the - sub-identifiers in bytes (*).
-With OpenSSL 3.0, support to fetch cryptographic algorithms using names / - identifiers in string form was introduced. This includes using OBJECT - IDENTIFIERs in canonical numeric text form as identifiers for fetching - algorithms.
-Such OBJECT IDENTIFIERs may be received through the ASN.1 structure - AlgorithmIdentifier, which is commonly used in multiple protocols to specify - what cryptographic algorithm should be used to sign or verify, encrypt or - decrypt, or digest passed data.
-Applications that call OBJ_obj2txt() directly with untrusted data are - affected, with any version of OpenSSL. If the use is for the mere purpose - of display, the severity is considered low.
-In OpenSSL 3.0 and newer, this affects the subsystems OCSP, PKCS7/SMIME, - CMS, CMP/CRMF or TS. It also impacts anything that processes X.509 - certificates, including simple things like verifying its signature.
-The impact on TLS is relatively low, because all versions of OpenSSL have a - 100KiB limit on the peer's certificate chain. Additionally, this only - impacts clients, or servers that have explicitly enabled client - authentication.
-In OpenSSL 1.1.1 and 1.0.2, this only affects displaying diverse objects, - such as X.509 certificates. This is assumed to not happen in such a way - that it would cause a Denial of Service, so these versions are considered - not affected by this issue in such a way that it would be cause for concern, - and the severity is therefore considered low.
+Issue summary: The AES-XTS cipher decryption implementation for 64 bit ARM + platform contains a bug that could cause it to read past the input buffer, + leading to a crash.
+Impact summary: Applications that use the AES-XTS algorithm on the 64 bit ARM + platform can crash in rare circumstances. The AES-XTS algorithm is usually + used for disk encryption.
+The AES-XTS cipher decryption implementation for 64 bit ARM platform will read + past the end of the ciphertext buffer if the ciphertext size is 4 mod 5 in 16 + byte blocks, e.g. 144 bytes or 1024 bytes. If the memory after the ciphertext + buffer is unmapped, this will trigger a crash which results in a denial of + service.
+If an attacker can control the size and location of the ciphertext buffer + being decrypted by an application using AES-XTS on 64 bit ARM, the + application is affected. This is fairly unlikely making this issue + a Low severity one.
Upgrade Alpine:3.17 openssl to version 3.0.9-r0 or higher.
Upgrade Alpine:3.17 openssl to version 3.0.8-r4 or higher.
Note: Versions mentioned in the description apply only to the upstream openssl package and not the openssl package as distributed by Alpine:3.17.
See How to fix? for Alpine:3.17 relevant fixed versions and status.
Issue summary: The AES-XTS cipher decryption implementation for 64 bit ARM - platform contains a bug that could cause it to read past the input buffer, - leading to a crash.
-Impact summary: Applications that use the AES-XTS algorithm on the 64 bit ARM - platform can crash in rare circumstances. The AES-XTS algorithm is usually - used for disk encryption.
-The AES-XTS cipher decryption implementation for 64 bit ARM platform will read - past the end of the ciphertext buffer if the ciphertext size is 4 mod 5 in 16 - byte blocks, e.g. 144 bytes or 1024 bytes. If the memory after the ciphertext - buffer is unmapped, this will trigger a crash which results in a denial of - service.
-If an attacker can control the size and location of the ciphertext buffer - being decrypted by an application using AES-XTS on 64 bit ARM, the - application is affected. This is fairly unlikely making this issue - a Low severity one.
+Issue summary: Processing some specially crafted ASN.1 object identifiers or + data containing them may be very slow.
+Impact summary: Applications that use OBJ_obj2txt() directly, or use any of + the OpenSSL subsystems OCSP, PKCS7/SMIME, CMS, CMP/CRMF or TS with no message + size limit may experience notable to very long delays when processing those + messages, which may lead to a Denial of Service.
+An OBJECT IDENTIFIER is composed of a series of numbers - sub-identifiers - + most of which have no size limit. OBJ_obj2txt() may be used to translate + an ASN.1 OBJECT IDENTIFIER given in DER encoding form (using the OpenSSL + type ASN1_OBJECT) to its canonical numeric text form, which are the + sub-identifiers of the OBJECT IDENTIFIER in decimal form, separated by + periods.
+When one of the sub-identifiers in the OBJECT IDENTIFIER is very large + (these are sizes that are seen as absurdly large, taking up tens or hundreds + of KiBs), the translation to a decimal number in text may take a very long + time. The time complexity is O(n^2) with 'n' being the size of the + sub-identifiers in bytes (*).
+With OpenSSL 3.0, support to fetch cryptographic algorithms using names / + identifiers in string form was introduced. This includes using OBJECT + IDENTIFIERs in canonical numeric text form as identifiers for fetching + algorithms.
+Such OBJECT IDENTIFIERs may be received through the ASN.1 structure + AlgorithmIdentifier, which is commonly used in multiple protocols to specify + what cryptographic algorithm should be used to sign or verify, encrypt or + decrypt, or digest passed data.
+Applications that call OBJ_obj2txt() directly with untrusted data are + affected, with any version of OpenSSL. If the use is for the mere purpose + of display, the severity is considered low.
+In OpenSSL 3.0 and newer, this affects the subsystems OCSP, PKCS7/SMIME, + CMS, CMP/CRMF or TS. It also impacts anything that processes X.509 + certificates, including simple things like verifying its signature.
+The impact on TLS is relatively low, because all versions of OpenSSL have a + 100KiB limit on the peer's certificate chain. Additionally, this only + impacts clients, or servers that have explicitly enabled client + authentication.
+In OpenSSL 1.1.1 and 1.0.2, this only affects displaying diverse objects, + such as X.509 certificates. This is assumed to not happen in such a way + that it would cause a Denial of Service, so these versions are considered + not affected by this issue in such a way that it would be cause for concern, + and the severity is therefore considered low.
Upgrade Alpine:3.17 openssl to version 3.0.8-r4 or higher.
Upgrade Alpine:3.17 openssl to version 3.0.9-r0 or higher.
Note: Versions mentioned in the description apply only to the upstream perl package and not the perl package as distributed by Ubuntu:22.04.
+ See How to fix? for Ubuntu:22.04 relevant fixed versions and status.
CPAN.pm before 2.35 does not verify TLS certificates when downloading distributions over HTTPS.
+There is no fixed version for Ubuntu:22.04 perl.
Note: Versions mentioned in the description apply only to the upstream libcap2 package and not the libcap2 package as distributed by Ubuntu:22.04.
- See How to fix? for Ubuntu:22.04 relevant fixed versions and status.
A vulnerability was found in libcap. This issue occurs in the _libcap_strdup() function and can lead to an integer overflow if the input string is close to 4GiB.
+This vulnerability has not been analyzed by NVD yet.
There is no fixed version for Ubuntu:22.04 libcap2.
Note: Versions mentioned in the description apply only to the upstream libcap2 package and not the libcap2 package as distributed by Ubuntu:22.04.
- See How to fix? for Ubuntu:22.04 relevant fixed versions and status.
A vulnerability was found in the pthread_create() function in libcap. This issue may allow a malicious actor to use cause __real_pthread_create() to return an error, which can exhaust the process memory.
+This vulnerability has not been analyzed by NVD yet.
There is no fixed version for Ubuntu:22.04 libcap2.
Note: Versions mentioned in the description apply only to the upstream openssl package and not the openssl package as distributed by Alpine:3.17.
See How to fix? for Alpine:3.17 relevant fixed versions and status.
Issue summary: Processing some specially crafted ASN.1 object identifiers or - data containing them may be very slow.
-Impact summary: Applications that use OBJ_obj2txt() directly, or use any of - the OpenSSL subsystems OCSP, PKCS7/SMIME, CMS, CMP/CRMF or TS with no message - size limit may experience notable to very long delays when processing those - messages, which may lead to a Denial of Service.
-An OBJECT IDENTIFIER is composed of a series of numbers - sub-identifiers - - most of which have no size limit. OBJ_obj2txt() may be used to translate - an ASN.1 OBJECT IDENTIFIER given in DER encoding form (using the OpenSSL - type ASN1_OBJECT) to its canonical numeric text form, which are the - sub-identifiers of the OBJECT IDENTIFIER in decimal form, separated by - periods.
-When one of the sub-identifiers in the OBJECT IDENTIFIER is very large - (these are sizes that are seen as absurdly large, taking up tens or hundreds - of KiBs), the translation to a decimal number in text may take a very long - time. The time complexity is O(n^2) with 'n' being the size of the - sub-identifiers in bytes (*).
-With OpenSSL 3.0, support to fetch cryptographic algorithms using names / - identifiers in string form was introduced. This includes using OBJECT - IDENTIFIERs in canonical numeric text form as identifiers for fetching - algorithms.
-Such OBJECT IDENTIFIERs may be received through the ASN.1 structure - AlgorithmIdentifier, which is commonly used in multiple protocols to specify - what cryptographic algorithm should be used to sign or verify, encrypt or - decrypt, or digest passed data.
-Applications that call OBJ_obj2txt() directly with untrusted data are - affected, with any version of OpenSSL. If the use is for the mere purpose - of display, the severity is considered low.
-In OpenSSL 3.0 and newer, this affects the subsystems OCSP, PKCS7/SMIME, - CMS, CMP/CRMF or TS. It also impacts anything that processes X.509 - certificates, including simple things like verifying its signature.
-The impact on TLS is relatively low, because all versions of OpenSSL have a - 100KiB limit on the peer's certificate chain. Additionally, this only - impacts clients, or servers that have explicitly enabled client - authentication.
-In OpenSSL 1.1.1 and 1.0.2, this only affects displaying diverse objects, - such as X.509 certificates. This is assumed to not happen in such a way - that it would cause a Denial of Service, so these versions are considered - not affected by this issue in such a way that it would be cause for concern, - and the severity is therefore considered low.
+Applications that use a non-default option when verifying certificates may be + vulnerable to an attack from a malicious CA to circumvent certain checks.
+Invalid certificate policies in leaf certificates are silently ignored by + OpenSSL and other certificate policy checks are skipped for that certificate. + A malicious CA could use this to deliberately assert invalid certificate policies + in order to circumvent policy checking on the certificate altogether.
+Policy processing is disabled by default but can be enabled by passing
+ the -policy' argument to the command line utilities or by calling the X509_VERIFY_PARAM_set1_policies()' function.
Upgrade Alpine:3.17 openssl to version 3.0.9-r0 or higher.
Upgrade Alpine:3.17 openssl to version 3.0.8-r2 or higher.
Note: Versions mentioned in the description apply only to the upstream openssl package and not the openssl package as distributed by Alpine:3.17.
See How to fix? for Alpine:3.17 relevant fixed versions and status.
Applications that use a non-default option when verifying certificates may be - vulnerable to an attack from a malicious CA to circumvent certain checks.
-Invalid certificate policies in leaf certificates are silently ignored by - OpenSSL and other certificate policy checks are skipped for that certificate. - A malicious CA could use this to deliberately assert invalid certificate policies - in order to circumvent policy checking on the certificate altogether.
-Policy processing is disabled by default but can be enabled by passing
- the -policy' argument to the command line utilities or by calling the X509_VERIFY_PARAM_set1_policies()' function.
The function X509_VERIFY_PARAM_add0_policy() is documented to + implicitly enable the certificate policy check when doing certificate + verification. However the implementation of the function does not + enable the check which allows certificates with invalid or incorrect + policies to pass the certificate verification.
+As suddenly enabling the policy check could break existing deployments it was + decided to keep the existing behavior of the X509_VERIFY_PARAM_add0_policy() + function.
+Instead the applications that require OpenSSL to perform certificate + policy check need to use X509_VERIFY_PARAM_set1_policies() or explicitly + enable the policy check by calling X509_VERIFY_PARAM_set_flags() with + the X509_V_FLAG_POLICY_CHECK flag argument.
+Certificate policy checks are disabled by default in OpenSSL and are not + commonly used by applications.
Upgrade Alpine:3.17 openssl to version 3.0.8-r2 or higher.
Upgrade Alpine:3.17 openssl to version 3.0.8-r3 or higher.
Note: Versions mentioned in the description apply only to the upstream openssl package and not the openssl package as distributed by Alpine:3.17.
See How to fix? for Alpine:3.17 relevant fixed versions and status.
The function X509_VERIFY_PARAM_add0_policy() is documented to - implicitly enable the certificate policy check when doing certificate - verification. However the implementation of the function does not - enable the check which allows certificates with invalid or incorrect - policies to pass the certificate verification.
-As suddenly enabling the policy check could break existing deployments it was - decided to keep the existing behavior of the X509_VERIFY_PARAM_add0_policy() - function.
-Instead the applications that require OpenSSL to perform certificate - policy check need to use X509_VERIFY_PARAM_set1_policies() or explicitly - enable the policy check by calling X509_VERIFY_PARAM_set_flags() with - the X509_V_FLAG_POLICY_CHECK flag argument.
-Certificate policy checks are disabled by default in OpenSSL and are not - commonly used by applications.
+Issue summary: The AES-XTS cipher decryption implementation for 64 bit ARM + platform contains a bug that could cause it to read past the input buffer, + leading to a crash.
+Impact summary: Applications that use the AES-XTS algorithm on the 64 bit ARM + platform can crash in rare circumstances. The AES-XTS algorithm is usually + used for disk encryption.
+The AES-XTS cipher decryption implementation for 64 bit ARM platform will read + past the end of the ciphertext buffer if the ciphertext size is 4 mod 5 in 16 + byte blocks, e.g. 144 bytes or 1024 bytes. If the memory after the ciphertext + buffer is unmapped, this will trigger a crash which results in a denial of + service.
+If an attacker can control the size and location of the ciphertext buffer + being decrypted by an application using AES-XTS on 64 bit ARM, the + application is affected. This is fairly unlikely making this issue + a Low severity one.
Upgrade Alpine:3.17 openssl to version 3.0.8-r3 or higher.
Upgrade Alpine:3.17 openssl to version 3.0.8-r4 or higher.
Note: Versions mentioned in the description apply only to the upstream openssl package and not the openssl package as distributed by Alpine:3.17.
See How to fix? for Alpine:3.17 relevant fixed versions and status.
Issue summary: The AES-XTS cipher decryption implementation for 64 bit ARM - platform contains a bug that could cause it to read past the input buffer, - leading to a crash.
-Impact summary: Applications that use the AES-XTS algorithm on the 64 bit ARM - platform can crash in rare circumstances. The AES-XTS algorithm is usually - used for disk encryption.
-The AES-XTS cipher decryption implementation for 64 bit ARM platform will read - past the end of the ciphertext buffer if the ciphertext size is 4 mod 5 in 16 - byte blocks, e.g. 144 bytes or 1024 bytes. If the memory after the ciphertext - buffer is unmapped, this will trigger a crash which results in a denial of - service.
-If an attacker can control the size and location of the ciphertext buffer - being decrypted by an application using AES-XTS on 64 bit ARM, the - application is affected. This is fairly unlikely making this issue - a Low severity one.
+Issue summary: Processing some specially crafted ASN.1 object identifiers or + data containing them may be very slow.
+Impact summary: Applications that use OBJ_obj2txt() directly, or use any of + the OpenSSL subsystems OCSP, PKCS7/SMIME, CMS, CMP/CRMF or TS with no message + size limit may experience notable to very long delays when processing those + messages, which may lead to a Denial of Service.
+An OBJECT IDENTIFIER is composed of a series of numbers - sub-identifiers - + most of which have no size limit. OBJ_obj2txt() may be used to translate + an ASN.1 OBJECT IDENTIFIER given in DER encoding form (using the OpenSSL + type ASN1_OBJECT) to its canonical numeric text form, which are the + sub-identifiers of the OBJECT IDENTIFIER in decimal form, separated by + periods.
+When one of the sub-identifiers in the OBJECT IDENTIFIER is very large + (these are sizes that are seen as absurdly large, taking up tens or hundreds + of KiBs), the translation to a decimal number in text may take a very long + time. The time complexity is O(n^2) with 'n' being the size of the + sub-identifiers in bytes (*).
+With OpenSSL 3.0, support to fetch cryptographic algorithms using names / + identifiers in string form was introduced. This includes using OBJECT + IDENTIFIERs in canonical numeric text form as identifiers for fetching + algorithms.
+Such OBJECT IDENTIFIERs may be received through the ASN.1 structure + AlgorithmIdentifier, which is commonly used in multiple protocols to specify + what cryptographic algorithm should be used to sign or verify, encrypt or + decrypt, or digest passed data.
+Applications that call OBJ_obj2txt() directly with untrusted data are + affected, with any version of OpenSSL. If the use is for the mere purpose + of display, the severity is considered low.
+In OpenSSL 3.0 and newer, this affects the subsystems OCSP, PKCS7/SMIME, + CMS, CMP/CRMF or TS. It also impacts anything that processes X.509 + certificates, including simple things like verifying its signature.
+The impact on TLS is relatively low, because all versions of OpenSSL have a + 100KiB limit on the peer's certificate chain. Additionally, this only + impacts clients, or servers that have explicitly enabled client + authentication.
+In OpenSSL 1.1.1 and 1.0.2, this only affects displaying diverse objects, + such as X.509 certificates. This is assumed to not happen in such a way + that it would cause a Denial of Service, so these versions are considered + not affected by this issue in such a way that it would be cause for concern, + and the severity is therefore considered low.
Upgrade Alpine:3.17 openssl to version 3.0.8-r4 or higher.
Upgrade Alpine:3.17 openssl to version 3.0.9-r0 or higher.
Note: Versions mentioned in the description apply only to the upstream openssl package and not the openssl package as distributed by Alpine:3.17.
See How to fix? for Alpine:3.17 relevant fixed versions and status.
Issue summary: Processing some specially crafted ASN.1 object identifiers or - data containing them may be very slow.
-Impact summary: Applications that use OBJ_obj2txt() directly, or use any of - the OpenSSL subsystems OCSP, PKCS7/SMIME, CMS, CMP/CRMF or TS with no message - size limit may experience notable to very long delays when processing those - messages, which may lead to a Denial of Service.
-An OBJECT IDENTIFIER is composed of a series of numbers - sub-identifiers - - most of which have no size limit. OBJ_obj2txt() may be used to translate - an ASN.1 OBJECT IDENTIFIER given in DER encoding form (using the OpenSSL - type ASN1_OBJECT) to its canonical numeric text form, which are the - sub-identifiers of the OBJECT IDENTIFIER in decimal form, separated by - periods.
-When one of the sub-identifiers in the OBJECT IDENTIFIER is very large - (these are sizes that are seen as absurdly large, taking up tens or hundreds - of KiBs), the translation to a decimal number in text may take a very long - time. The time complexity is O(n^2) with 'n' being the size of the - sub-identifiers in bytes (*).
-With OpenSSL 3.0, support to fetch cryptographic algorithms using names / - identifiers in string form was introduced. This includes using OBJECT - IDENTIFIERs in canonical numeric text form as identifiers for fetching - algorithms.
-Such OBJECT IDENTIFIERs may be received through the ASN.1 structure - AlgorithmIdentifier, which is commonly used in multiple protocols to specify - what cryptographic algorithm should be used to sign or verify, encrypt or - decrypt, or digest passed data.
-Applications that call OBJ_obj2txt() directly with untrusted data are - affected, with any version of OpenSSL. If the use is for the mere purpose - of display, the severity is considered low.
-In OpenSSL 3.0 and newer, this affects the subsystems OCSP, PKCS7/SMIME, - CMS, CMP/CRMF or TS. It also impacts anything that processes X.509 - certificates, including simple things like verifying its signature.
-The impact on TLS is relatively low, because all versions of OpenSSL have a - 100KiB limit on the peer's certificate chain. Additionally, this only - impacts clients, or servers that have explicitly enabled client - authentication.
-In OpenSSL 1.1.1 and 1.0.2, this only affects displaying diverse objects, - such as X.509 certificates. This is assumed to not happen in such a way - that it would cause a Denial of Service, so these versions are considered - not affected by this issue in such a way that it would be cause for concern, - and the severity is therefore considered low.
+Issue summary: The AES-XTS cipher decryption implementation for 64 bit ARM + platform contains a bug that could cause it to read past the input buffer, + leading to a crash.
+Impact summary: Applications that use the AES-XTS algorithm on the 64 bit ARM + platform can crash in rare circumstances. The AES-XTS algorithm is usually + used for disk encryption.
+The AES-XTS cipher decryption implementation for 64 bit ARM platform will read + past the end of the ciphertext buffer if the ciphertext size is 4 mod 5 in 16 + byte blocks, e.g. 144 bytes or 1024 bytes. If the memory after the ciphertext + buffer is unmapped, this will trigger a crash which results in a denial of + service.
+If an attacker can control the size and location of the ciphertext buffer + being decrypted by an application using AES-XTS on 64 bit ARM, the + application is affected. This is fairly unlikely making this issue + a Low severity one.
Upgrade Alpine:3.17 openssl to version 3.0.9-r0 or higher.
Upgrade Alpine:3.17 openssl to version 3.0.8-r4 or higher.
Note: Versions mentioned in the description apply only to the upstream openssl package and not the openssl package as distributed by Alpine:3.17.
See How to fix? for Alpine:3.17 relevant fixed versions and status.
Issue summary: The AES-XTS cipher decryption implementation for 64 bit ARM - platform contains a bug that could cause it to read past the input buffer, - leading to a crash.
-Impact summary: Applications that use the AES-XTS algorithm on the 64 bit ARM - platform can crash in rare circumstances. The AES-XTS algorithm is usually - used for disk encryption.
-The AES-XTS cipher decryption implementation for 64 bit ARM platform will read - past the end of the ciphertext buffer if the ciphertext size is 4 mod 5 in 16 - byte blocks, e.g. 144 bytes or 1024 bytes. If the memory after the ciphertext - buffer is unmapped, this will trigger a crash which results in a denial of - service.
-If an attacker can control the size and location of the ciphertext buffer - being decrypted by an application using AES-XTS on 64 bit ARM, the - application is affected. This is fairly unlikely making this issue - a Low severity one.
+Issue summary: Processing some specially crafted ASN.1 object identifiers or + data containing them may be very slow.
+Impact summary: Applications that use OBJ_obj2txt() directly, or use any of + the OpenSSL subsystems OCSP, PKCS7/SMIME, CMS, CMP/CRMF or TS with no message + size limit may experience notable to very long delays when processing those + messages, which may lead to a Denial of Service.
+An OBJECT IDENTIFIER is composed of a series of numbers - sub-identifiers - + most of which have no size limit. OBJ_obj2txt() may be used to translate + an ASN.1 OBJECT IDENTIFIER given in DER encoding form (using the OpenSSL + type ASN1_OBJECT) to its canonical numeric text form, which are the + sub-identifiers of the OBJECT IDENTIFIER in decimal form, separated by + periods.
+When one of the sub-identifiers in the OBJECT IDENTIFIER is very large + (these are sizes that are seen as absurdly large, taking up tens or hundreds + of KiBs), the translation to a decimal number in text may take a very long + time. The time complexity is O(n^2) with 'n' being the size of the + sub-identifiers in bytes (*).
+With OpenSSL 3.0, support to fetch cryptographic algorithms using names / + identifiers in string form was introduced. This includes using OBJECT + IDENTIFIERs in canonical numeric text form as identifiers for fetching + algorithms.
+Such OBJECT IDENTIFIERs may be received through the ASN.1 structure + AlgorithmIdentifier, which is commonly used in multiple protocols to specify + what cryptographic algorithm should be used to sign or verify, encrypt or + decrypt, or digest passed data.
+Applications that call OBJ_obj2txt() directly with untrusted data are + affected, with any version of OpenSSL. If the use is for the mere purpose + of display, the severity is considered low.
+In OpenSSL 3.0 and newer, this affects the subsystems OCSP, PKCS7/SMIME, + CMS, CMP/CRMF or TS. It also impacts anything that processes X.509 + certificates, including simple things like verifying its signature.
+The impact on TLS is relatively low, because all versions of OpenSSL have a + 100KiB limit on the peer's certificate chain. Additionally, this only + impacts clients, or servers that have explicitly enabled client + authentication.
+In OpenSSL 1.1.1 and 1.0.2, this only affects displaying diverse objects, + such as X.509 certificates. This is assumed to not happen in such a way + that it would cause a Denial of Service, so these versions are considered + not affected by this issue in such a way that it would be cause for concern, + and the severity is therefore considered low.
Upgrade Alpine:3.17 openssl to version 3.0.8-r4 or higher.
Upgrade Alpine:3.17 openssl to version 3.0.9-r0 or higher.
gopkg.in/yaml.v3 is a YAML support package for the Go language.
+Affected versions of this package are vulnerable to Denial of Service (DoS) via the Unmarshal function, which causes the program to crash when attempting to deserialize invalid input.
package main
+
+ import (
+ "gopkg.in/yaml.v3"
+ )
+
+ func main() {
+ var t interface{}
+ yaml.Unmarshal([]byte("0: [:!00 \xef"), &t)
+ }
+
+ Denial of Service (DoS) describes a family of attacks, all aimed at making a system inaccessible to its intended and legitimate users.
+Unlike other vulnerabilities, DoS attacks usually do not aim at breaching security. Rather, they are focused on making websites and services unavailable to genuine users resulting in downtime.
+One popular Denial of Service vulnerability is DDoS (a Distributed Denial of Service), an attack that attempts to clog network pipes to the system by generating a large volume of traffic from many machines.
+When it comes to open source libraries, DoS vulnerabilities allow attackers to trigger such a crash or crippling of the service by using a flaw either in the application code or from the use of open source libraries.
+Two common types of DoS vulnerabilities:
+High CPU/Memory Consumption- An attacker sending crafted requests that could cause the system to take a disproportionate amount of time to process. For example, commons-fileupload:commons-fileupload.
+Crash - An attacker sending crafted requests that could cause the system to crash. For Example, npm ws package
Upgrade gopkg.in/yaml.v3 to version 3.0.0 or higher.
gopkg.in/yaml.v3 is a YAML support package for the Go language.
+Affected versions of this package are vulnerable to NULL Pointer Dereference when parsing #\n-\n-\n0 via the parserc.go parser.
package main
+
+ import (
+ "gopkg.in/yaml.v3"
+ )
+
+ func main() {
+ var t interface{}
+ yaml.Unmarshal([]byte("#\n-\n-\n0"), &t)
+ }
+
+ Upgrade gopkg.in/yaml.v3 to version 3.0.1 or higher.
Affected versions of this package are vulnerable to Denial of Service (DoS) such that a maliciously crafted HTTP/2 stream could cause excessive CPU consumption in the HPACK decoder.
Denial of Service (DoS) describes a family of attacks, all aimed at making a system inaccessible to its intended and legitimate users.
+Unlike other vulnerabilities, DoS attacks usually do not aim at breaching security. Rather, they are focused on making websites and services unavailable to genuine users resulting in downtime.
+One popular Denial of Service vulnerability is DDoS (a Distributed Denial of Service), an attack that attempts to clog network pipes to the system by generating a large volume of traffic from many machines.
+When it comes to open source libraries, DoS vulnerabilities allow attackers to trigger such a crash or crippling of the service by using a flaw either in the application code or from the use of open source libraries.
+Two common types of DoS vulnerabilities:
+High CPU/Memory Consumption- An attacker sending crafted requests that could cause the system to take a disproportionate amount of time to process. For example, commons-fileupload:commons-fileupload.
+Crash - An attacker sending crafted requests that could cause the system to crash. For Example, npm ws package
Upgrade golang.org/x/net/http2/hpack to version 0.7.0 or higher.
golang.org/x/net/http2 is a work-in-progress HTTP/2 implementation for Go.
+Affected versions of this package are vulnerable to Denial of Service as an HTTP/2 connection can hang during closing if a shutdown was preempted by a fatal error.
Upgrade golang.org/x/net/http2 to version 0.0.0-20220906165146-f3363e06e74c, 1.18.6, 1.19.1 or higher.
golang.org/x/net/http2 is a work-in-progress HTTP/2 implementation for Go.
+Affected versions of this package are vulnerable to Denial of Service (DoS) such that a maliciously crafted HTTP/2 stream could cause excessive CPU consumption in the HPACK decoder.
Denial of Service (DoS) describes a family of attacks, all aimed at making a system inaccessible to its intended and legitimate users.
+Unlike other vulnerabilities, DoS attacks usually do not aim at breaching security. Rather, they are focused on making websites and services unavailable to genuine users resulting in downtime.
+One popular Denial of Service vulnerability is DDoS (a Distributed Denial of Service), an attack that attempts to clog network pipes to the system by generating a large volume of traffic from many machines.
+When it comes to open source libraries, DoS vulnerabilities allow attackers to trigger such a crash or crippling of the service by using a flaw either in the application code or from the use of open source libraries.
+Two common types of DoS vulnerabilities:
+High CPU/Memory Consumption- An attacker sending crafted requests that could cause the system to take a disproportionate amount of time to process. For example, commons-fileupload:commons-fileupload.
+Crash - An attacker sending crafted requests that could cause the system to crash. For Example, npm ws package
Upgrade golang.org/x/net/http2 to version 0.7.0 or higher.
Note: Versions mentioned in the description apply only to the upstream perl package and not the perl package as distributed by Ubuntu:22.04.
+ See How to fix? for Ubuntu:22.04 relevant fixed versions and status.
CPAN.pm before 2.35 does not verify TLS certificates when downloading distributions over HTTPS.
+There is no fixed version for Ubuntu:22.04 perl.
Note: Versions mentioned in the description apply only to the upstream openssl package and not the openssl package as distributed by Ubuntu:22.04.
+ See How to fix? for Ubuntu:22.04 relevant fixed versions and status.
Issue summary: Processing some specially crafted ASN.1 object identifiers or + data containing them may be very slow.
+Impact summary: Applications that use OBJ_obj2txt() directly, or use any of + the OpenSSL subsystems OCSP, PKCS7/SMIME, CMS, CMP/CRMF or TS with no message + size limit may experience notable to very long delays when processing those + messages, which may lead to a Denial of Service.
+An OBJECT IDENTIFIER is composed of a series of numbers - sub-identifiers - + most of which have no size limit. OBJ_obj2txt() may be used to translate + an ASN.1 OBJECT IDENTIFIER given in DER encoding form (using the OpenSSL + type ASN1_OBJECT) to its canonical numeric text form, which are the + sub-identifiers of the OBJECT IDENTIFIER in decimal form, separated by + periods.
+When one of the sub-identifiers in the OBJECT IDENTIFIER is very large + (these are sizes that are seen as absurdly large, taking up tens or hundreds + of KiBs), the translation to a decimal number in text may take a very long + time. The time complexity is O(n^2) with 'n' being the size of the + sub-identifiers in bytes (*).
+With OpenSSL 3.0, support to fetch cryptographic algorithms using names / + identifiers in string form was introduced. This includes using OBJECT + IDENTIFIERs in canonical numeric text form as identifiers for fetching + algorithms.
+Such OBJECT IDENTIFIERs may be received through the ASN.1 structure + AlgorithmIdentifier, which is commonly used in multiple protocols to specify + what cryptographic algorithm should be used to sign or verify, encrypt or + decrypt, or digest passed data.
+Applications that call OBJ_obj2txt() directly with untrusted data are + affected, with any version of OpenSSL. If the use is for the mere purpose + of display, the severity is considered low.
+In OpenSSL 3.0 and newer, this affects the subsystems OCSP, PKCS7/SMIME, + CMS, CMP/CRMF or TS. It also impacts anything that processes X.509 + certificates, including simple things like verifying its signature.
+The impact on TLS is relatively low, because all versions of OpenSSL have a + 100KiB limit on the peer's certificate chain. Additionally, this only + impacts clients, or servers that have explicitly enabled client + authentication.
+In OpenSSL 1.1.1 and 1.0.2, this only affects displaying diverse objects, + such as X.509 certificates. This is assumed to not happen in such a way + that it would cause a Denial of Service, so these versions are considered + not affected by this issue in such a way that it would be cause for concern, + and the severity is therefore considered low.
+Upgrade Ubuntu:22.04 openssl to version 3.0.2-0ubuntu1.10 or higher.
This vulnerability has not been analyzed by NVD yet.
+There is no fixed version for Ubuntu:22.04 libcap2.
gopkg.in/yaml.v2 is a YAML support package for the Go language.
+Affected versions of this package are vulnerable to Denial of Service (DoS). It is possible for authorized users to send malicious YAML payloads to cause kube-apiserver to consume excessive CPU cycles while parsing YAML.
+Denial of Service (DoS) describes a family of attacks, all aimed at making a system inaccessible to its intended and legitimate users.
+Unlike other vulnerabilities, DoS attacks usually do not aim at breaching security. Rather, they are focused on making websites and services unavailable to genuine users resulting in downtime.
+One popular Denial of Service vulnerability is DDoS (a Distributed Denial of Service), an attack that attempts to clog network pipes to the system by generating a large volume of traffic from many machines.
+When it comes to open source libraries, DoS vulnerabilities allow attackers to trigger such a crash or crippling of the service by using a flaw either in the application code or from the use of open source libraries.
+Two common types of DoS vulnerabilities:
+High CPU/Memory Consumption- An attacker sending crafted requests that could cause the system to take a disproportionate amount of time to process. For example, commons-fileupload:commons-fileupload.
+Crash - An attacker sending crafted requests that could cause the system to crash. For Example, npm ws package
Upgrade gopkg.in/yaml.v2 to version 2.2.8 or higher.
Affected versions of this package are vulnerable to Improper Input Validation due to the parser being, by design, exposed to untrusted user input, which can be leveraged to force a program to consume significant time parsing Accept-Language headers.
Upgrade golang.org/x/text/language to version 0.3.8 or higher.
Affected versions of this package are vulnerable to Denial of Service (DoS) such that a maliciously crafted HTTP/2 stream could cause excessive CPU consumption in the HPACK decoder.
Denial of Service (DoS) describes a family of attacks, all aimed at making a system inaccessible to its intended and legitimate users.
-Unlike other vulnerabilities, DoS attacks usually do not aim at breaching security. Rather, they are focused on making websites and services unavailable to genuine users resulting in downtime.
-One popular Denial of Service vulnerability is DDoS (a Distributed Denial of Service), an attack that attempts to clog network pipes to the system by generating a large volume of traffic from many machines.
-When it comes to open source libraries, DoS vulnerabilities allow attackers to trigger such a crash or crippling of the service by using a flaw either in the application code or from the use of open source libraries.
-Two common types of DoS vulnerabilities:
-High CPU/Memory Consumption- An attacker sending crafted requests that could cause the system to take a disproportionate amount of time to process. For example, commons-fileupload:commons-fileupload.
-Crash - An attacker sending crafted requests that could cause the system to crash. For Example, npm ws package
Affected versions of this package are vulnerable to Incorrect Privilege Assignment such that when called with a non-zero flags parameter, the Faccessat function can incorrectly report that a file is accessible.
Upgrade golang.org/x/net/http2/hpack to version 0.7.0 or higher.
Upgrade golang.org/x/sys/unix to version 0.1.0 or higher.
golang.org/x/net/http2 is a work-in-progress HTTP/2 implementation for Go.
-Affected versions of this package are vulnerable to Denial of Service (DoS) such that a maliciously crafted HTTP/2 stream could cause excessive CPU consumption in the HPACK decoder.
Affected versions of this package are vulnerable to Denial of Service (DoS) due to improper checks and limitations for the number of entries in the cache, which can allow an attacker to consume unbounded amounts of memory by sending a small number of very large keys.
Denial of Service (DoS) describes a family of attacks, all aimed at making a system inaccessible to its intended and legitimate users.
Unlike other vulnerabilities, DoS attacks usually do not aim at breaching security. Rather, they are focused on making websites and services unavailable to genuine users resulting in downtime.
@@ -619,24 +1586,24 @@Upgrade golang.org/x/net/http2 to version 0.7.0 or higher.
Upgrade golang.org/x/net/http2 to version 0.4.0 or higher.
Note: Versions mentioned in the description apply only to the upstream openssl package and not the openssl package as distributed by Ubuntu:22.04.
+ See How to fix? for Ubuntu:22.04 relevant fixed versions and status.
Issue summary: The AES-XTS cipher decryption implementation for 64 bit ARM + platform contains a bug that could cause it to read past the input buffer, + leading to a crash.
+Impact summary: Applications that use the AES-XTS algorithm on the 64 bit ARM + platform can crash in rare circumstances. The AES-XTS algorithm is usually + used for disk encryption.
+The AES-XTS cipher decryption implementation for 64 bit ARM platform will read + past the end of the ciphertext buffer if the ciphertext size is 4 mod 5 in 16 + byte blocks, e.g. 144 bytes or 1024 bytes. If the memory after the ciphertext + buffer is unmapped, this will trigger a crash which results in a denial of + service.
+If an attacker can control the size and location of the ciphertext buffer + being decrypted by an application using AES-XTS on 64 bit ARM, the + application is affected. This is fairly unlikely making this issue + a Low severity one.
+Upgrade Ubuntu:22.04 openssl to version 3.0.2-0ubuntu1.10 or higher.