Skip to content

Commit

Permalink
Add feature interpolation to sample_points, allow texture_mapping to …
Browse files Browse the repository at this point in the history
…take 1D tensor as coordinates (NVIDIAGameWorks#499)

* Mesh sampling /w UV texture interpolation

Signed-off-by: operel <[email protected]>

* fix tests accordingly to new padding_mode

Signed-off-by: Clement Fuji Tsang <[email protected]>

* fix tetmesh docstring examples

Signed-off-by: Clement Fuji Tsang <[email protected]>

* fix docstring

Signed-off-by: Clement Fuji Tsang <[email protected]>

Co-authored-by: operel <[email protected]>
Signed-off-by: Alex Zook <[email protected]>
  • Loading branch information
2 people authored and zookae committed Mar 30, 2022
1 parent 3ea885f commit 9e13bb0
Show file tree
Hide file tree
Showing 13 changed files with 334 additions and 72 deletions.
4 changes: 2 additions & 2 deletions kaolin/metrics/tetmesh.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ def tetrahedron_volume(tet_vertices):
... [0.4750, 0.4500, 0.4500],
... [0.5000, 0.5000, 0.5000]]]])
>>> tetrahedron_volume(tet_vertices)
tensor([[2.0833e-05]])
tensor([[-2.0833e-05]])
"""
_validate_tet_vertices(tet_vertices)

Expand Down Expand Up @@ -88,7 +88,7 @@ def equivolume(tet_vertices, tetrahedrons_mean=None, pow=4):
... [0.5500, 0.3500, 0.9000]]]])
>>> equivolume(tet_vertices, pow=4)
tensor([[2.2898e-15],
[2.9661e-10]])
[1.5422e-09]])
"""
_validate_tet_vertices(tet_vertices)

Expand Down
127 changes: 98 additions & 29 deletions kaolin/ops/mesh/trianglemesh.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,9 +36,32 @@ def _base_face_areas(face_vertices_0, face_vertices_1, face_vertices_2):

return areas

def _base_sample_points_selected_faces(face_vertices_0, face_vertices_1, face_vertices_2):
"""Base function to sample points."""
sampling_shape = tuple(int(d) for d in face_vertices_0.shape[:-1]) + (1,)
def _base_sample_points_selected_faces(face_vertices, face_features=None):
"""Base function to sample points over selected faces.
The coordinates of the face vertices are interpolated to generate new samples.
Args:
face_vertices (tuple of torch.Tensor):
Coordinates of vertices, corresponding to selected faces to sample from.
A tuple of 3 entries corresponding to each of the face vertices.
Each entry is a torch.Tensor of shape :math:`(\\text{batch_size}, \\text{num_samples}, 3)`.
face_features (tuple of torch.Tensor, Optional):
Features of face vertices, corresponding to selected faces to sample from.
A tuple of 3 entries corresponding to each of the face vertices.
Each entry is a torch.Tensor of shape
:math:`(\\text{batch_size}, \\text{num_samples}, \\text{feature_dim})`.
Returns:
(torch.Tensor, torch.Tensor):
Sampled point coordinates of shape :math:`(\\text{batch_size}, \\text{num_samples}, 3)`.
Sampled points interpolated features of shape
:math:`(\\text{batch_size}, \\text{num_samples}, \\text{feature_dim})`.
If `face_vertices_features` arg is not specified, the returned interpolated features are None.
"""

face_vertices0, face_vertices1, face_vertices2 = face_vertices

sampling_shape = tuple(int(d) for d in face_vertices0.shape[:-1]) + (1,)
# u is proximity to middle point between v1 and v2 against v0.
# v is proximity to v2 against v1.
#
Expand All @@ -47,18 +70,25 @@ def _base_sample_points_selected_faces(face_vertices_0, face_vertices_1, face_ve
# so using torch.sqrt we make a change of variable to have the desired density
# f_Y(y) = f_X(y ^ 2) * |d(y ^ 2) / dy| = 2y
u = torch.sqrt(torch.rand(sampling_shape,
device=face_vertices_0.device,
dtype=face_vertices_0.dtype))
device=face_vertices0.device,
dtype=face_vertices0.dtype))

v = torch.rand(sampling_shape,
device=face_vertices_0.device,
dtype=face_vertices_0.dtype)
device=face_vertices0.device,
dtype=face_vertices0.dtype)
w0 = 1 - u
w1 = u * (1 - v)
w2 = u * v

points = w0 * face_vertices0 + w1 * face_vertices1 + w2 * face_vertices2

points = (1 - u) * face_vertices_0 + \
(u * (1 - v)) * face_vertices_1 + \
u * v * face_vertices_2
features = None
if face_features is not None:
face_features0, face_features1, face_features2 = face_features
features = w0 * face_features0 + w1 * face_features1 + \
w2 * face_features2

return points
return points, features

def face_areas(vertices, faces):
"""Compute the areas of each face of triangle meshes.
Expand Down Expand Up @@ -120,16 +150,21 @@ def packed_face_areas(vertices, first_idx_vertices, faces, num_faces_per_mesh):
return areas.view(-1)


def sample_points(vertices, faces, num_samples, areas=None):
def sample_points(vertices, faces, num_samples, areas=None, face_features=None):
"""Uniformly sample points over the surface of triangle meshes.
First face on which the point is sampled is randomly selected,
with the probability of selection being proportional to the area of the face.
then the coordinate on the face is uniformly sampled.
If ``face_features`` is defined for the mesh faces,
the sampled points will be returned with interpolated features as well,
otherwise, no feature interpolation will occur.
Args:
vertices (torch.Tensor):
The vertices of the meshes, of shape :math:`(\\text{batch_size}, \\text{num_vertices}, 3)`.
The vertices of the meshes, of shape
:math:`(\\text{batch_size}, \\text{num_vertices}, 3)`.
faces (torch.LongTensor):
The faces of the mesh, of shape :math:`(\\text{num_faces}, 3)`.
num_samples (int):
Expand All @@ -138,32 +173,67 @@ def sample_points(vertices, faces, num_samples, areas=None):
The areas of each face, of shape :math:`(\\text{batch_size}, \\text{num_faces})`,
can be preprocessed, for fast on-the-fly sampling,
will be computed if None (default).
face_features (torch.Tensor, optional):
Per-vertex-per-face features, matching ``faces`` order,
of shape :math:`(\\text{batch_size}, \\text{num_faces}, 3, \\text{feature_dim})`.
For example:
1. Texture uv coordinates would be of shape
:math:`(\\text{batch_size}, \\text{num_faces}, 3, 2)`.
2. RGB color values would be of shape
:math:`(\\text{batch_size}, \\text{num_faces}, 3, 3)`.
When specified, it is used to interpolate the features for new sampled points.
See also:
:func:`~kaolin.ops.mesh.index_vertices_by_faces` for conversion of features defined per vertex
and need to be converted to per-vertex-per-face shape of :math:`(\\text{num_faces}, 3)`.
Returns:
(torch.Tensor, torch.LongTensor):
the pointclouds of shape :math:`(\\text{batch_size}, \\text{num_points}, 3)`,
and the indexes of the faces selected, of shape :math:`(\\text{batch_size}, \\text{num_points})`.
(torch.Tensor, torch.LongTensor, (optional) torch.Tensor):
the pointclouds of shape :math:`(\\text{batch_size}, \\text{num_samples}, 3)`,
and the indexes of the faces selected, of shape :math:`(\\text{batch_size}, \\text{num_samples})`.
If ``face_features`` arg is specified, then the interpolated features of sampled points of shape
:math:`(\\text{batch_size}, \\text{num_samples}, \\text{feature_dim})` are also returned.
"""
if faces.shape[-1] != 3:
raise NotImplementedError("sample_points is only implemented for triangle meshes")
faces_0, faces_1, faces_2 = torch.split(faces, 1, dim=1)
face_v_0 = torch.index_select(vertices, 1, faces_0.reshape(-1))
face_v_1 = torch.index_select(vertices, 1, faces_1.reshape(-1))
face_v_2 = torch.index_select(vertices, 1, faces_2.reshape(-1))
faces_0, faces_1, faces_2 = torch.split(faces, 1, dim=1) # (num_faces, 3) -> tuple of (num_faces,)
face_v_0 = torch.index_select(vertices, 1, faces_0.reshape(-1)) # (batch_size, num_faces, 3)
face_v_1 = torch.index_select(vertices, 1, faces_1.reshape(-1)) # (batch_size, num_faces, 3)
face_v_2 = torch.index_select(vertices, 1, faces_2.reshape(-1)) # (batch_size, num_faces, 3)

if areas is None:
areas = _base_face_areas(face_v_0, face_v_1, face_v_2).squeeze(-1)
face_dist = torch.distributions.Categorical(areas)
face_choices = face_dist.sample([num_samples]).transpose(0, 1)
_face_choices = face_choices.unsqueeze(-1).repeat(1, 1, 3)
v0 = torch.gather(face_v_0, 1, _face_choices)
v1 = torch.gather(face_v_1, 1, _face_choices)
v2 = torch.gather(face_v_2, 1, _face_choices)

points = _base_sample_points_selected_faces(v0, v1, v2)

return points, face_choices
v0 = torch.gather(face_v_0, 1, _face_choices) # (batch_size, num_samples, 3)
v1 = torch.gather(face_v_1, 1, _face_choices) # (batch_size, num_samples, 3)
v2 = torch.gather(face_v_2, 1, _face_choices) # (batch_size, num_samples, 3)
face_vertices_choices = (v0, v1, v2)

# UV coordinates are available, make sure to calculate them for sampled points as well
face_features_choices = None
if face_features is not None:
feat_dim = face_features.shape[-1]
# (num_faces, 3) -> tuple of (num_faces,)
_face_choices = face_choices[..., None, None].repeat(1, 1, 3, feat_dim)
face_features_choices = torch.gather(face_features, 1, _face_choices)
face_features_choices = tuple(
tmp_feat.squeeze(2) for tmp_feat in torch.split(face_features_choices, 1, dim=2))

points, point_features = _base_sample_points_selected_faces(
face_vertices_choices, face_features_choices)

if point_features is not None:
return points, face_choices, point_features
else:
return points, face_choices

# TODO(cfujitsang): packed_sample_points can return a packed if `num_samples` is an an iterable
# TODO(cfujitsang): add face_features as argument
def packed_sample_points(vertices, first_idx_vertices,
faces, num_faces_per_mesh, num_samples, areas=None):
"""Uniformly sample points over the surface of triangle meshes.
Expand Down Expand Up @@ -226,8 +296,7 @@ def packed_sample_points(vertices, first_idx_vertices,
v0 = torch.index_select(face_v_0, 0, merged_face_choices).reshape(batch_size, num_samples, 3)
v1 = torch.index_select(face_v_1, 0, merged_face_choices).reshape(batch_size, num_samples, 3)
v2 = torch.index_select(face_v_2, 0, merged_face_choices).reshape(batch_size, num_samples, 3)

points = _base_sample_points_selected_faces(v0, v1, v2)
points, _ = _base_sample_points_selected_faces((v0, v1, v2))

return points, merged_face_choices.reshape(batch_size, num_samples)

Expand Down
64 changes: 40 additions & 24 deletions kaolin/render/mesh/utils.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
# Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
# Copyright (c) 2019,20-21 NVIDIA CORPORATION & AFFILIATES.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
Expand All @@ -21,44 +22,59 @@
from ... import ops

def texture_mapping(texture_coordinates, texture_maps, mode='nearest'):
r"""Interpolates texture_maps by texture_coordinates.
Note that opengl tex coord is different from pytorch's coord.
opengl coord ranges from 0 to 1, y axis is from bottom to top
and it supports circular mode(-0.1 is the same as 0.9)
pytorch coord ranges from -1 to 1, y axis is from top to bottom and does not support circular
filtering is the same as the mode parameter for torch.nn.functional.grid_sample.
r"""Interpolates texture_maps by dense or sparse texture_coordinates.
This function supports sampling texture coordinates for:
1. An entire 2D image
2. A sparse point cloud of texture coordinates.
Args:
texture_coordinates(torch.FloatTensor):
image texture coordinate, of shape :math:`(\text{batch_size}, h, w, 2)`
dense image texture coordinate, of shape :math:`(\text{batch_size}, h, w, 2)` or
sparse texture coordinate for points, of shape :math:`(\text{batch_size}, \text{num_points}, 2)`
Coordinates are expected to be normalized between [0, 1].
Note that opengl tex coord is different from pytorch's coord.
opengl coord ranges from 0 to 1, y axis is from bottom to top
and it supports circular mode(-0.1 is the same as 0.9)
pytorch coord ranges from -1 to 1, y axis is from top to bottom and does not support circular
filtering is the same as the mode parameter for torch.nn.functional.grid_sample.
texture_maps(torch.FloatTensor):
textures, of shape :math:`(\text{batch_size}, 3, h', w')`.
Here, h' & w' is the height and width of texture maps
while h and w is height and width of rendered image.
For each pixel in the rendered image we use the coordinates in the texture_coordinates
to query corresponding RGB color in texture maps.
h' & w' could be different from h & w
textures of shape :math:`(\text{batch_size}, \text{num_channels}, h', w')`.
Here, :math:`h'` & :math:`w'` are the height and width of texture maps.
If ``texture_coordinates`` are image texture coordinates -
For each pixel in the rendered image of height we use the coordinates in
texture_coordinates to query corresponding value in texture maps.
Note that height :math:`h` and width :math:`w` of the rendered image could be different from
:math:`h'` & :math:`w'`.
If ``texture_coordinates`` are sparse texture coordinates -
For each point in ``texture_coordinates`` we query the corresponding value in ``texture_maps``.
Returns:
(torch.FloatTensor): interpolated texture, of shape :math:`(\text{batch_size}, h, w, 3)`
(torch.FloatTensor):
interpolated texture of shape :math:`(\text{batch_size}, h, w, \text{num_channels})` or
interpolated texture of shape :math:`(\text{batch_size}, \text{num_points}, \text{num_channels})`
"""
batch_size = texture_coordinates.shape[0]
num_channels = texture_maps.shape[1]
_texture_coordinates = texture_coordinates.reshape(batch_size, -1, 1, 2)

# convert coord mode from ogl to pytorch
# some opengl texture coordinate is larger than 1 or less than 0
# in opengl it will be normalized by remainder
# we do the same in pytorch
texture_coordinates = torch.clamp(texture_coordinates, 0., 1.)
texture_coordinates = texture_coordinates * 2 - 1 # [0, 1] to [-1, 1]
texture_coordinates[:, :, :, 1] = -texture_coordinates[:, :, :, 1] # reverse y
_texture_coordinates = torch.clamp(_texture_coordinates, 0., 1.)
_texture_coordinates = _texture_coordinates * 2 - 1 # [0, 1] to [-1, 1]
_texture_coordinates[:, :, :, 1] = -_texture_coordinates[:, :, :, 1] # reverse y

# sample
texture_interpolates = torch.nn.functional.grid_sample(texture_maps,
texture_coordinates,
_texture_coordinates,
mode=mode,
align_corners=False)
align_corners=False,
padding_mode='border')
texture_interpolates = texture_interpolates.permute(0, 2, 3, 1)

return texture_interpolates

return texture_interpolates.reshape(batch_size, *texture_coordinates.shape[1:-1], num_channels)

def spherical_harmonic_lighting(imnormal, lights):
r"""Creates lighting effects.
Expand Down
Loading

0 comments on commit 9e13bb0

Please sign in to comment.