Skip to content

Commit

Permalink
c10::optional -> std::optional in pytorch/audio/src/libtorio/ffmpeg/s…
Browse files Browse the repository at this point in the history
…tream_reader/stream_processor.h +20

Summary:
X-link: pytorch/audio#3792

`c10::optional` was switched to be `std::optional` after PyTorch moved to C++17. Let's eliminate `c10::optional`, if we can.

Reviewed By: albanD

Differential Revision: D57294285

fbshipit-source-id: b29f8f3d7ac8aee0546dcad71c1b0197278fa0e5
  • Loading branch information
r-barnes authored and facebook-github-bot committed May 14, 2024
1 parent 512675c commit 227fa3e
Show file tree
Hide file tree
Showing 5 changed files with 25 additions and 25 deletions.
2 changes: 1 addition & 1 deletion include/cuda_utils.cuh
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,7 @@ template<
template <typename U> class PtrTraits = at::DefaultPtrTraits,
typename index_t = int64_t>
static at::PackedTensorAccessor<scalar_t, dim, PtrTraits, index_t> packed_accessor_or_dummy(
const c10::optional<at::Tensor>& t) {
const std::optional<at::Tensor>& t) {
if (!t.has_value()) {
const std::vector<index_t> zeros(dim);
return at::PackedTensorAccessor<scalar_t, dim, PtrTraits, index_t>(nullptr, zeros.data(), zeros.data());
Expand Down
20 changes: 10 additions & 10 deletions include/inplace_abn.h
Original file line number Diff line number Diff line change
Expand Up @@ -36,34 +36,34 @@ void forward_cpu(
at::Tensor& x,
const at::Tensor& mean,
const at::Tensor& var,
const c10::optional<at::Tensor>& weight,
const c10::optional<at::Tensor>& bias,
const std::optional<at::Tensor>& weight,
const std::optional<at::Tensor>& bias,
float eps,
Activation activation,
float activation_param);
void forward_cuda(
at::Tensor& x,
const at::Tensor& mean,
const at::Tensor& var,
const c10::optional<at::Tensor>& weight,
const c10::optional<at::Tensor>& bias,
const std::optional<at::Tensor>& weight,
const std::optional<at::Tensor>& bias,
float eps,
Activation activation,
float activation_param);

std::tuple<at::Tensor, at::Tensor, at::Tensor, at::Tensor> backward_reduce_cpu(
const at::Tensor& y_act,
const at::Tensor& dy_act,
const c10::optional<at::Tensor>& weight,
const c10::optional<at::Tensor>& bias,
const std::optional<at::Tensor>& weight,
const std::optional<at::Tensor>& bias,
float eps,
Activation activation,
float activation_param);
std::tuple<at::Tensor, at::Tensor, at::Tensor, at::Tensor> backward_reduce_cuda(
const at::Tensor& y_act,
const at::Tensor& dy_act,
const c10::optional<at::Tensor>& weight,
const c10::optional<at::Tensor>& bias,
const std::optional<at::Tensor>& weight,
const std::optional<at::Tensor>& bias,
float eps,
Activation activation,
float activation_param);
Expand All @@ -75,7 +75,7 @@ void backward_cpu(
const at::Tensor& count,
const at::Tensor& sum_dy,
const at::Tensor& sum_xhat_dy,
const c10::optional<at::Tensor>& weight,
const std::optional<at::Tensor>& weight,
float eps);
void backward_cuda(
const at::Tensor& xhat,
Expand All @@ -84,7 +84,7 @@ void backward_cuda(
const at::Tensor& count,
const at::Tensor& sum_dy,
const at::Tensor& sum_xhat_dy,
const c10::optional<at::Tensor>& weight,
const std::optional<at::Tensor>& weight,
float eps);

/***********************************************************************************************************************
Expand Down
2 changes: 1 addition & 1 deletion include/utils.h
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ at::Tensor normalize_shape(const at::Tensor& x);

template <typename scalar_t, int64_t dim>
static at::TensorAccessor<scalar_t, dim> accessor_or_dummy(
const c10::optional<at::Tensor>& t) {
const std::optional<at::Tensor>& t) {
if (!t.has_value()) {
return at::TensorAccessor<scalar_t, dim>(nullptr, nullptr, nullptr);
}
Expand Down
12 changes: 6 additions & 6 deletions src/inplace_abn.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -60,8 +60,8 @@ void forward(
at::Tensor& x,
const at::Tensor& mean,
const at::Tensor& var,
const c10::optional<at::Tensor>& weight,
const c10::optional<at::Tensor>& bias,
const std::optional<at::Tensor>& weight,
const std::optional<at::Tensor>& bias,
float eps,
Activation activation,
float activation_param) {
Expand Down Expand Up @@ -96,8 +96,8 @@ void forward(
std::tuple<at::Tensor, at::Tensor, at::Tensor, at::Tensor> backward_reduce(
const at::Tensor& y_act,
const at::Tensor& dy_act,
const c10::optional<at::Tensor>& weight,
const c10::optional<at::Tensor>& bias,
const std::optional<at::Tensor>& weight,
const std::optional<at::Tensor>& bias,
float eps,
Activation activation,
float activation_param) {
Expand Down Expand Up @@ -143,7 +143,7 @@ void backward_train(
const at::Tensor& count,
const at::Tensor& sum_dy,
const at::Tensor& sum_xhat_dy,
const c10::optional<at::Tensor>& weight,
const std::optional<at::Tensor>& weight,
float eps) {
// Check dimensions and types
IABN_CHECK(xhat.ndimension() >= 2, "xhat should have at least 2 dimensions");
Expand Down Expand Up @@ -176,7 +176,7 @@ void backward_train(
at::Tensor backward_test(
const at::Tensor& dy_,
const at::Tensor& var,
const c10::optional<at::Tensor>& weight,
const std::optional<at::Tensor>& weight,
float eps) {
// Check dimensions and types
IABN_CHECK(dy_.ndimension() >= 2, "dy should have at least 2 dimensions");
Expand Down
14 changes: 7 additions & 7 deletions src/inplace_abn_cpu.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -25,8 +25,8 @@ template <typename scalar_t, Activation activation>
std::tuple<at::Tensor, at::Tensor, at::Tensor, at::Tensor> backward_reduce_impl(
const at::Tensor& y_act_,
const at::Tensor& dy_act_,
const c10::optional<at::Tensor>& weight_,
const c10::optional<at::Tensor>& bias_,
const std::optional<at::Tensor>& weight_,
const std::optional<at::Tensor>& bias_,
float eps,
float activation_param) {
// Initialize output tensors
Expand Down Expand Up @@ -108,8 +108,8 @@ void forward_cpu(
at::Tensor& x_,
const at::Tensor& mean,
const at::Tensor& var,
const c10::optional<at::Tensor>& weight,
const c10::optional<at::Tensor>& bias,
const std::optional<at::Tensor>& weight,
const std::optional<at::Tensor>& bias,
float eps,
Activation activation,
float activation_param) {
Expand Down Expand Up @@ -144,8 +144,8 @@ void forward_cpu(
std::tuple<at::Tensor, at::Tensor, at::Tensor, at::Tensor> backward_reduce_cpu(
const at::Tensor& y_act,
const at::Tensor& dy_act,
const c10::optional<at::Tensor>& weight,
const c10::optional<at::Tensor>& bias,
const std::optional<at::Tensor>& weight,
const std::optional<at::Tensor>& bias,
float eps,
Activation activation,
float activation_param) {
Expand Down Expand Up @@ -176,7 +176,7 @@ void backward_cpu(
const at::Tensor& count,
const at::Tensor& sum_dy,
const at::Tensor& sum_xhat_dy,
const c10::optional<at::Tensor>& weight,
const std::optional<at::Tensor>& weight,
float eps) {
CHECK_NOT_HALF(xhat_);

Expand Down

0 comments on commit 227fa3e

Please sign in to comment.