Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[PT FE] Add aten::logcumsumexp #28538

Open
wants to merge 1 commit into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
40 changes: 40 additions & 0 deletions src/frontends/pytorch/src/op/logcumsumexp.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,40 @@
// Copyright (C) 2018-2025 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//

#include "openvino/frontend/pytorch/node_context.hpp"
#include "openvino/op/convert.hpp"
#include "openvino/op/cum_sum.hpp"
#include "openvino/op/exp.hpp"
#include "openvino/op/log.hpp"
#include "utils.hpp"

namespace ov {
namespace frontend {
namespace pytorch {
namespace op {

using namespace ov::op;

OutputVector translate_logcumsumexp(const NodeContext& context) {
// aten::logcumsumexp(Tensor self, int dim) -> Tensor
num_inputs_check(context, 2, 2);
auto input = context.get_input(0);
auto dim = context.get_input(1);

// First compute exp(input)
auto exp = context.mark_node(std::make_shared<v0::Exp>(input));

// Then compute cumsum of the exponentials
auto cumsum = context.mark_node(std::make_shared<v0::CumSum>(exp, dim));

// Finally take log of the result
auto log = context.mark_node(std::make_shared<v0::Log>(cumsum));
Comment on lines +25 to +32
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

we may have exponent explosion for large input elements during computation of exp^(input_el).
In order to avoid this, please do the following:

  1. compute max elements along the required dim. max=ReduceMax(input, dim)
  2. ln(cumsum(exp(input), dim)) = ln(exp(max) cumsum(exp(input-max), dim)) = max + ln(cumsum(exp(input-max), dim))


return {log};
}

} // namespace op
} // namespace pytorch
} // namespace frontend
} // namespace ov
2 changes: 2 additions & 0 deletions src/frontends/pytorch/src/op_table.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -139,6 +139,7 @@ OP_CONVERTER(translate_log_softmax);
OP_CONVERTER(translate_log2);
OP_CONVERTER(translate_log10);
OP_CONVERTER(translate_logsumexp);
OP_CONVERTER(translate_logcumsumexp);
OP_CONVERTER(translate_loop);
OP_CONVERTER(translate_lstm);
OP_CONVERTER(translate_masked_fill);
Expand Down Expand Up @@ -744,6 +745,7 @@ const std::unordered_map<std::string, CreatorFunction> get_supported_ops_ts() {
{"torchvision::deform_conv2d", op::translate_deform_conv},
{"torchvision::nms", op::translate_nms},
{"torchvision::roi_align", op::translate_roi_align},
{"aten::logcumsumexp", op::translate_logcumsumexp},
};
};

Expand Down
31 changes: 31 additions & 0 deletions tests/layer_tests/pytorch_tests/test_logcumsumexp.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,31 @@
# Copyright (C) 2018-2025 Intel Corporation
# SPDX-License-Identifier: Apache-2.0

import pytest
import numpy as np
import torch
from pytorch_layer_test_class import PytorchLayerTest


class TestLogCumSumExp(PytorchLayerTest):
def _prepare_input(self):
return (np.random.randn(1, 3, 224, 224).astype(np.float32),)

def create_model(self, axis):
class aten_logcumsumexp(torch.nn.Module):
def __init__(self, axis):
super(aten_logcumsumexp, self).__init__()
self.axis = axis

def forward(self, x):
return torch.logcumsumexp(x, self.axis)

ref_net = None

return aten_logcumsumexp(axis), ref_net, "aten::logcumsumexp"

@pytest.mark.parametrize("axis", [0, 1, 2, 3, -1, -2, -3, -4])
@pytest.mark.nightly
@pytest.mark.precommit
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

please also enable this operation for torch.export. Need to add @pytest.mark.precommit_torch_export

def test_logcumsumexp(self, axis, ie_device, precision, ir_version):
self._test(*self.create_model(axis), ie_device, precision, ir_version)
Loading