Skip to content

Commit

Permalink
Change default option for exponential integration (#495)
Browse files Browse the repository at this point in the history
Signed-off-by: Towaki Takikawa <[email protected]>
  • Loading branch information
tovacinni authored Dec 15, 2021
1 parent 8fe43a0 commit dd98c85
Show file tree
Hide file tree
Showing 2 changed files with 3 additions and 3 deletions.
4 changes: 2 additions & 2 deletions kaolin/render/spc/raytrace.py
Original file line number Diff line number Diff line change
Expand Up @@ -262,7 +262,7 @@ def cumprod(feats, boundaries, exclusive=False, reverse=False):
"""
return Cumprod.apply(feats.contiguous(), boundaries.contiguous(), exclusive, reverse)

def exponential_integration(feats, tau, boundaries, exclusive=False):
def exponential_integration(feats, tau, boundaries, exclusive=True):
r"""Exponential transmittance integration across packs using the optical thickness (tau).
Exponential transmittance is derived from the Beer-Lambert law. Typical implementations of
Expand All @@ -278,7 +278,7 @@ def exponential_integration(feats, tau, boundaries, exclusive=False):
boundaries (torch.BoolTensor): bools of shape :math:`(\text{num_rays})`.
Given some index array marking the pack IDs, the boundaries can be calculated with
:func:`mark_pack_boundaries`.
exclusive (bool): Compute exclusive exponential integration if true.
exclusive (bool): Compute exclusive exponential integration if true. (default: True)
Returns:
(torch.FloatTensor, torch.FloatTensor)
Expand Down
2 changes: 1 addition & 1 deletion tests/python/kaolin/render/spc/test_rayops.py
Original file line number Diff line number Diff line change
Expand Up @@ -199,7 +199,7 @@ def test_cumprod_exclusive_reverse(self, feats, boundaries):
assert torch.equal(cumprod, expected)

def test_exponential_integration(self, feats, tau, boundaries):
integrated_feats, transmittance = spc_render.exponential_integration(feats, tau, boundaries)
integrated_feats, transmittance = spc_render.exponential_integration(feats, tau, boundaries, exclusive=False)
expected_feats = torch.tensor([[0,0], [0.4651,0.4651], [1.1627, 1.1627]], device='cuda', dtype=torch.float)
expected_transmittance = torch.tensor([[0.0],[0.0],[0.0],[0.2325],[0.0],[0.2325]], device='cuda', dtype=torch.float)
assert torch.allclose(integrated_feats, expected_feats, atol=1e-4)
Expand Down

0 comments on commit dd98c85

Please sign in to comment.