Skip to content

Commit

Permalink
c10::optional -> std::optional in pytorch/audio/src/libtorio/ffmpeg/s…
Browse files Browse the repository at this point in the history
…tream_reader/stream_processor.h +20

Summary:
X-link: pytorch/audio#3792

`c10::optional` was switched to be `std::optional` after PyTorch moved to C++17. Let's eliminate `c10::optional`, if we can.

Reviewed By: albanD

Differential Revision: D57294285

fbshipit-source-id: b29f8f3d7ac8aee0546dcad71c1b0197278fa0e5
  • Loading branch information
r-barnes authored and facebook-github-bot committed May 14, 2024
1 parent 9877559 commit 155edb6
Show file tree
Hide file tree
Showing 3 changed files with 59 additions and 59 deletions.
38 changes: 19 additions & 19 deletions torchcsprng/csrc/csprng.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ static const auto TENSOR_DEVICE_TYPE_IS_NOT_SUPPORTED = "tensor device type is n

// ==================================================== Random ========================================================

Tensor& random_(Tensor& self, c10::optional<Generator> gen) {
Tensor& random_(Tensor& self, std::optional<Generator> gen) {
if (self.device().type() == DeviceType::CPU) {
return cpu::random_(self, gen);
#ifdef WITH_CUDA
Expand All @@ -39,7 +39,7 @@ Tensor& random_(Tensor& self, c10::optional<Generator> gen) {
}

Tensor& random_from_to(Tensor& self, int64_t from, optional<int64_t> to,
c10::optional<Generator> gen) {
std::optional<Generator> gen) {
if (self.device().type() == DeviceType::CPU) {
return cpu::random_from_to(self, from, to, gen);
#ifdef WITH_CUDA
Expand All @@ -52,7 +52,7 @@ Tensor& random_from_to(Tensor& self, int64_t from, optional<int64_t> to,
}

Tensor& random_to(Tensor& self, int64_t to,
c10::optional<Generator> gen) {
std::optional<Generator> gen) {
if (self.device().type() == DeviceType::CPU) {
return cpu::random_to(self, to, gen);
#ifdef WITH_CUDA
Expand All @@ -66,7 +66,7 @@ Tensor& random_to(Tensor& self, int64_t to,

// ==================================================== Uniform =======================================================

Tensor& uniform_(Tensor& self, double from, double to, c10::optional<Generator> gen) {
Tensor& uniform_(Tensor& self, double from, double to, std::optional<Generator> gen) {
if (self.device().type() == DeviceType::CPU) {
return cpu::uniform_(self, from, to, gen);
#ifdef WITH_CUDA
Expand All @@ -80,7 +80,7 @@ Tensor& uniform_(Tensor& self, double from, double to, c10::optional<Generator>

// ==================================================== Normal ========================================================

Tensor& normal_(Tensor& self, double mean, double std, c10::optional<Generator> gen) {
Tensor& normal_(Tensor& self, double mean, double std, std::optional<Generator> gen) {
if (self.device().type() == DeviceType::CPU) {
return cpu::normal_(self, mean, std, gen);
#ifdef WITH_CUDA
Expand All @@ -92,7 +92,7 @@ Tensor& normal_(Tensor& self, double mean, double std, c10::optional<Generator>
}
}

Tensor& normal_Tensor_float_out(const Tensor& mean, double std, c10::optional<Generator> gen, Tensor& output) {
Tensor& normal_Tensor_float_out(const Tensor& mean, double std, std::optional<Generator> gen, Tensor& output) {
if (output.device().type() == DeviceType::CPU) {
return cpu::normal_Tensor_float_out(output, mean, std, gen);
#ifdef WITH_CUDA
Expand All @@ -104,7 +104,7 @@ Tensor& normal_Tensor_float_out(const Tensor& mean, double std, c10::optional<Ge
}
}

Tensor& normal_float_Tensor_out(double mean, const Tensor& std, c10::optional<Generator> gen, Tensor& output) {
Tensor& normal_float_Tensor_out(double mean, const Tensor& std, std::optional<Generator> gen, Tensor& output) {
if (output.device().type() == DeviceType::CPU) {
return cpu::normal_float_Tensor_out(output, mean, std, gen);
#ifdef WITH_CUDA
Expand All @@ -116,7 +116,7 @@ Tensor& normal_float_Tensor_out(double mean, const Tensor& std, c10::optional<Ge
}
}

Tensor& normal_Tensor_Tensor_out(const Tensor& mean, const Tensor& std, c10::optional<Generator> gen, Tensor& output) {
Tensor& normal_Tensor_Tensor_out(const Tensor& mean, const Tensor& std, std::optional<Generator> gen, Tensor& output) {
if (output.device().type() == DeviceType::CPU) {
return cpu::normal_Tensor_Tensor_out(output, mean, std, gen);
#ifdef WITH_CUDA
Expand All @@ -128,7 +128,7 @@ Tensor& normal_Tensor_Tensor_out(const Tensor& mean, const Tensor& std, c10::opt
}
}

Tensor normal_Tensor_float(const Tensor& mean, double std, c10::optional<Generator> gen) {
Tensor normal_Tensor_float(const Tensor& mean, double std, std::optional<Generator> gen) {
if (mean.device().type() == DeviceType::CPU) {
return cpu::normal_Tensor_float(mean, std, gen);
#ifdef WITH_CUDA
Expand All @@ -140,7 +140,7 @@ Tensor normal_Tensor_float(const Tensor& mean, double std, c10::optional<Generat
}
}

Tensor normal_float_Tensor(double mean, const Tensor& std, c10::optional<Generator> gen) {
Tensor normal_float_Tensor(double mean, const Tensor& std, std::optional<Generator> gen) {
if (std.device().type() == DeviceType::CPU) {
return cpu::normal_float_Tensor(mean, std, gen);
#ifdef WITH_CUDA
Expand All @@ -152,7 +152,7 @@ Tensor normal_float_Tensor(double mean, const Tensor& std, c10::optional<Generat
}
}

Tensor normal_Tensor_Tensor(const Tensor& mean, const Tensor& std, c10::optional<Generator> gen) {
Tensor normal_Tensor_Tensor(const Tensor& mean, const Tensor& std, std::optional<Generator> gen) {
if (mean.device().type() == DeviceType::CPU) {
return cpu::normal_Tensor_Tensor(mean, std, gen);
#ifdef WITH_CUDA
Expand All @@ -166,7 +166,7 @@ Tensor normal_Tensor_Tensor(const Tensor& mean, const Tensor& std, c10::optional

// ==================================================== Cauchy ========================================================

Tensor& cauchy_(Tensor& self, double median, double sigma, c10::optional<Generator> gen) {
Tensor& cauchy_(Tensor& self, double median, double sigma, std::optional<Generator> gen) {
if (self.device().type() == DeviceType::CPU) {
return cpu::cauchy_(self, median, sigma, gen);
#ifdef WITH_CUDA
Expand All @@ -180,7 +180,7 @@ Tensor& cauchy_(Tensor& self, double median, double sigma, c10::optional<Generat

// ================================================== LogNormal =======================================================

Tensor& log_normal_(Tensor& self, double mean, double std, c10::optional<Generator> gen) {
Tensor& log_normal_(Tensor& self, double mean, double std, std::optional<Generator> gen) {
if (self.device().type() == DeviceType::CPU) {
return cpu::log_normal_(self, mean, std, gen);
#ifdef WITH_CUDA
Expand All @@ -194,7 +194,7 @@ Tensor& log_normal_(Tensor& self, double mean, double std, c10::optional<Generat

// ================================================== Geometric =======================================================

Tensor& geometric_(Tensor& self, double p, c10::optional<Generator> gen) {
Tensor& geometric_(Tensor& self, double p, std::optional<Generator> gen) {
if (self.device().type() == DeviceType::CPU) {
return cpu::geometric_(self, p, gen);
#ifdef WITH_CUDA
Expand All @@ -208,7 +208,7 @@ Tensor& geometric_(Tensor& self, double p, c10::optional<Generator> gen) {

// ================================================== Exponential =====================================================

Tensor& exponential_(Tensor& self, double lambda, c10::optional<Generator> gen) {
Tensor& exponential_(Tensor& self, double lambda, std::optional<Generator> gen) {
if (self.device().type() == DeviceType::CPU) {
return cpu::exponential_(self, lambda, gen);
#ifdef WITH_CUDA
Expand Down Expand Up @@ -248,7 +248,7 @@ namespace {
}

template <typename scalar_t, typename RNG>
void randperm(Tensor& result, int64_t n, c10::optional<at::Generator> generator) {
void randperm(Tensor& result, int64_t n, std::optional<at::Generator> generator) {
auto gen = at::check_generator<RNG>(generator);
scalar_t *r__data = result.data_ptr<scalar_t>();

Expand All @@ -271,7 +271,7 @@ namespace {
}
} // namespace

Tensor& randperm_generator_out(int64_t n, c10::optional<Generator> generator, Tensor& result) {
Tensor& randperm_generator_out(int64_t n, std::optional<Generator> generator, Tensor& result) {
TORCH_CHECK(n >= 0, "n must be non-negative, got", n);
check_supported_max_int_with_precision(n, result);
if (result.device().type() == at::kCUDA) {
Expand Down Expand Up @@ -317,15 +317,15 @@ Tensor decrypt_pybind(Tensor input, Tensor output, Tensor key, const std::string

// ====================================================================================================================

Generator create_random_device_generator(c10::optional<std::string> token = c10::nullopt) {
Generator create_random_device_generator(std::optional<std::string> token = c10::nullopt) {
if (token.has_value()) {
return make_generator<CSPRNGGeneratorImpl>(*token);
} else {
return make_generator<CSPRNGGeneratorImpl>(true);
}
}

Generator create_mt19937_generator(c10::optional<uint64_t> seed = c10::nullopt) {
Generator create_mt19937_generator(std::optional<uint64_t> seed = c10::nullopt) {
if (seed.has_value()) {
return make_generator<CSPRNGGeneratorImpl>(*seed);
} else {
Expand Down
50 changes: 25 additions & 25 deletions torchcsprng/csrc/kernels_body.inc
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@
// using `generator`, which must be an instance of `at::CPUGeneratorImpl`
// and passes it to the `device`.
template<typename RNG>
at::Tensor key_tensor(size_t block_t_size, c10::optional<at::Generator> generator) {
at::Tensor key_tensor(size_t block_t_size, std::optional<at::Generator> generator) {
std::lock_guard<std::mutex> lock(generator->mutex());
auto gen = at::check_generator<RNG>(generator);
auto key = torch::empty({static_cast<signed long>(block_t_size)}, torch::kUInt8);
Expand Down Expand Up @@ -114,7 +114,7 @@ template <> struct UIntType<bool> { using type = uint32_t; };

template<typename RNG>
struct RandomKernel {
void operator()(TensorIterator& iter, c10::optional<Generator> generator) {
void operator()(TensorIterator& iter, std::optional<Generator> generator) {
const Tensor key_t = aes128_key_tensor<RNG>(*generator).to(iter.device());
const auto key = key_t.data_ptr<uint8_t>();
AT_DISPATCH_ALL_TYPES_AND3(at::ScalarType::Bool, at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "random_kernel", [&] {
Expand Down Expand Up @@ -150,7 +150,7 @@ void random_full_range_kernel_helper(TensorIterator& iter, const uint8_t* key) {

template<typename RNG>
struct RandomFromToKernel {
void operator()(TensorIterator& iter, uint64_t range, int64_t base, c10::optional<Generator> generator) {
void operator()(TensorIterator& iter, uint64_t range, int64_t base, std::optional<Generator> generator) {
const Tensor key_t = aes128_key_tensor<RNG>(*generator).to(iter.device());
const auto key = key_t.data_ptr<uint8_t>();
AT_DISPATCH_ALL_TYPES_AND3(at::ScalarType::Bool, at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "random_from_to_kernel", [&] {
Expand All @@ -166,7 +166,7 @@ struct RandomFromToKernel {
}
});
}
void operator()(TensorIterator& iter, c10::optional<Generator> generator) {
void operator()(TensorIterator& iter, std::optional<Generator> generator) {
const Tensor key_t = aes128_key_tensor<RNG>(*generator).to(iter.device());
const auto key = key_t.data_ptr<uint8_t>();
AT_DISPATCH_ALL_TYPES_AND3(at::ScalarType::Bool, at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "random_full_64_bits_range_kernel", [&] {
Expand All @@ -183,23 +183,23 @@ struct RandomFromToKernel {
}
};

at::Tensor& random_(at::Tensor& self, c10::optional<at::Generator> generator) {
at::Tensor& random_(at::Tensor& self, std::optional<at::Generator> generator) {
return at::native::templates::random_impl<RandomKernel, CSPRNGGeneratorImpl>(self, generator);
}

at::Tensor& random_from_to(at::Tensor& self, int64_t from, c10::optional<int64_t> to, c10::optional<at::Generator> generator) {
at::Tensor& random_from_to(at::Tensor& self, int64_t from, std::optional<int64_t> to, c10::optional<at::Generator> generator) {
return at::native::templates::random_from_to_impl<RandomFromToKernel, CSPRNGGeneratorImpl>(self, from, to, generator);
}

at::Tensor& random_to(at::Tensor& self, int64_t to, c10::optional<at::Generator> generator) {
at::Tensor& random_to(at::Tensor& self, int64_t to, std::optional<at::Generator> generator) {
return random_from_to(self, 0, to, generator);
}

// ==================================================== Uniform =======================================================

template<typename RNG>
struct UniformKernel {
void operator()(TensorIterator& iter, double from, double to, c10::optional<Generator> generator) {
void operator()(TensorIterator& iter, double from, double to, std::optional<Generator> generator) {
const Tensor key_t = aes128_key_tensor<RNG>(*generator).to(iter.device());
const auto key = key_t.data_ptr<uint8_t>();
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "uniform_kernel", [&] {
Expand All @@ -213,15 +213,15 @@ struct UniformKernel {
}
};

at::Tensor& uniform_(at::Tensor& self, double from, double to, c10::optional<at::Generator> generator) {
at::Tensor& uniform_(at::Tensor& self, double from, double to, std::optional<at::Generator> generator) {
return at::native::templates::uniform_impl_<UniformKernel, CSPRNGGeneratorImpl>(self, from, to, generator);
}

// ==================================================== Normal ========================================================

template<typename RNG>
struct NormalKernel {
void operator()(Tensor& self, double mean, double std, c10::optional<Generator> generator) {
void operator()(Tensor& self, double mean, double std, std::optional<Generator> generator) {
auto iter = TensorIterator::nullary_op(self);
const Tensor key_t = aes128_key_tensor<RNG>(*generator).to(iter.device());
const auto key = key_t.data_ptr<uint8_t>();
Expand All @@ -236,39 +236,39 @@ struct NormalKernel {
}
};

at::Tensor& normal_(at::Tensor& self, double mean, double std, c10::optional<at::Generator> generator) {
at::Tensor& normal_(at::Tensor& self, double mean, double std, std::optional<at::Generator> generator) {
return at::native::templates::normal_impl_<NormalKernel, CSPRNGGeneratorImpl>(self, mean, std, generator);
}

at::Tensor& normal_Tensor_float_out(at::Tensor& output, const at::Tensor& mean, double std, c10::optional<at::Generator> gen) {
at::Tensor& normal_Tensor_float_out(at::Tensor& output, const at::Tensor& mean, double std, std::optional<at::Generator> gen) {
return at::native::templates::normal_out_impl<NormalKernel, CSPRNGGeneratorImpl>(output, mean, std, gen);
}

at::Tensor& normal_float_Tensor_out(at::Tensor& output, double mean, const at::Tensor& std, c10::optional<at::Generator> gen) {
at::Tensor& normal_float_Tensor_out(at::Tensor& output, double mean, const at::Tensor& std, std::optional<at::Generator> gen) {
return at::native::templates::normal_out_impl<NormalKernel, CSPRNGGeneratorImpl>(output, mean, std, gen);
}

at::Tensor& normal_Tensor_Tensor_out(at::Tensor& output, const at::Tensor& mean, const at::Tensor& std, c10::optional<at::Generator> gen) {
at::Tensor& normal_Tensor_Tensor_out(at::Tensor& output, const at::Tensor& mean, const at::Tensor& std, std::optional<at::Generator> gen) {
return at::native::templates::normal_out_impl<NormalKernel, CSPRNGGeneratorImpl>(output, mean, std, gen);
}

at::Tensor normal_Tensor_float(const at::Tensor& mean, double std, c10::optional<at::Generator> gen) {
at::Tensor normal_Tensor_float(const at::Tensor& mean, double std, std::optional<at::Generator> gen) {
return at::native::templates::normal_impl<NormalKernel, CSPRNGGeneratorImpl>(mean, std, gen);
}

at::Tensor normal_float_Tensor(double mean, const at::Tensor& std, c10::optional<at::Generator> gen) {
at::Tensor normal_float_Tensor(double mean, const at::Tensor& std, std::optional<at::Generator> gen) {
return at::native::templates::normal_impl<NormalKernel, CSPRNGGeneratorImpl>(mean, std, gen);
}

at::Tensor normal_Tensor_Tensor(const at::Tensor& mean, const at::Tensor& std, c10::optional<at::Generator> gen) {
at::Tensor normal_Tensor_Tensor(const at::Tensor& mean, const at::Tensor& std, std::optional<at::Generator> gen) {
return at::native::templates::normal_impl<NormalKernel, CSPRNGGeneratorImpl>(mean, std, gen);
}

// ==================================================== Cauchy ========================================================

template<typename RNG>
struct CauchyKernel {
void operator()(TensorIterator& iter, double median, double sigma, c10::optional<Generator> generator) {
void operator()(TensorIterator& iter, double median, double sigma, std::optional<Generator> generator) {
const Tensor key_t = aes128_key_tensor<RNG>(*generator).to(iter.device());
const auto key = key_t.data_ptr<uint8_t>();
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "cauchy_kernel", [&] {
Expand All @@ -282,15 +282,15 @@ struct CauchyKernel {
}
};

at::Tensor& cauchy_(at::Tensor& self, double median, double sigma, c10::optional<at::Generator> generator) {
at::Tensor& cauchy_(at::Tensor& self, double median, double sigma, std::optional<at::Generator> generator) {
return at::native::templates::cauchy_impl_<CauchyKernel, CSPRNGGeneratorImpl>(self, median, sigma, generator);
}

// ================================================== LogNormal =======================================================

template<typename RNG>
struct LogNormalKernel {
void operator()(TensorIterator& iter, double mean, double std, c10::optional<Generator> generator) {
void operator()(TensorIterator& iter, double mean, double std, std::optional<Generator> generator) {
const Tensor key_t = aes128_key_tensor<RNG>(*generator).to(iter.device());
const auto key = key_t.data_ptr<uint8_t>();
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "log_normal", [&] {
Expand All @@ -304,15 +304,15 @@ struct LogNormalKernel {
}
};

at::Tensor& log_normal_(at::Tensor& self, double mean, double std, c10::optional<at::Generator> gen) {
at::Tensor& log_normal_(at::Tensor& self, double mean, double std, std::optional<at::Generator> gen) {
return at::native::templates::log_normal_impl_<LogNormalKernel, CSPRNGGeneratorImpl>(self, mean, std, gen);
}

// ================================================== Geometric =======================================================

template<typename RNG>
struct GeometricKernel {
void operator()(TensorIterator& iter, double p, c10::optional<Generator> generator) {
void operator()(TensorIterator& iter, double p, std::optional<Generator> generator) {
const Tensor key_t = aes128_key_tensor<RNG>(*generator).to(iter.device());
const auto key = key_t.data_ptr<uint8_t>();
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "geometric_kernel", [&] {
Expand All @@ -326,15 +326,15 @@ struct GeometricKernel {
}
};

at::Tensor& geometric_(at::Tensor& self, double p, c10::optional<at::Generator> gen) {
at::Tensor& geometric_(at::Tensor& self, double p, std::optional<at::Generator> gen) {
return at::native::templates::geometric_impl_<GeometricKernel, CSPRNGGeneratorImpl>(self, p, gen);
}

// ================================================== Exponential =====================================================

template<typename RNG>
struct ExponentialKernel {
void operator()(TensorIterator& iter, double lambda, c10::optional<Generator> generator) {
void operator()(TensorIterator& iter, double lambda, std::optional<Generator> generator) {
const Tensor key_t = aes128_key_tensor<RNG>(*generator).to(iter.device());
const auto key = key_t.data_ptr<uint8_t>();
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "exponential_kernel", [&] {
Expand All @@ -348,7 +348,7 @@ struct ExponentialKernel {
}
};

at::Tensor& exponential_(at::Tensor& self, double lambda, c10::optional<at::Generator> gen) {
at::Tensor& exponential_(at::Tensor& self, double lambda, std::optional<at::Generator> gen) {
return at::native::templates::exponential_impl_<ExponentialKernel, CSPRNGGeneratorImpl>(self, lambda, gen);
}

Expand Down
Loading

0 comments on commit 155edb6

Please sign in to comment.