Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Refactor more code to adhere to a stricter set of warnings #439

Closed
wants to merge 2 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions source/.bazelrc
Original file line number Diff line number Diff line change
Expand Up @@ -28,3 +28,6 @@ build:asan -c dbg --copt='-fsanitize=address' --copt='-fno-omit-frame-pointer' -

# For profiling builds
build:profile -c dbg --copt='-fno-omit-frame-pointer'

# Build with warnings as errors in CI
build:ci --copt='-Werror'
51 changes: 51 additions & 0 deletions source/bazel/cc.bzl
Original file line number Diff line number Diff line change
@@ -0,0 +1,51 @@
# Build rules that let us pass in a set of warning flags for some targets
# without requiring us to pass them to all targets (e.g. third party libraries)

_warnings = [
"-Weverything",
"-Wno-c++98-compat",
"-Wno-c++98-compat-pedantic",
"-Wno-padded",

# This is a warning that a move would not have been applied on old compilers
# https://reviews.llvm.org/D43322
"-Wno-return-std-move-in-c++11",

# We need this for polymorphism with header only implementations
"-Wno-weak-vtables",

# We're okay with these for now, but should refactor to remove if we can
"-Wno-global-constructors",
"-Wno-exit-time-destructors",

# It's okay if we use `default` in switch statements
"-Wno-switch-enum",

# Flexible array members
"-Wno-c99-extensions",
]

_test_warnings = _warnings + [
# Test specific warnings and exceptions
]

def neuropod_cc_library(name, copts = [], **kwargs):
native.cc_library(
name = name,
copts = _warnings + copts,
**kwargs
)

def neuropod_cc_binary(name, copts = [], **kwargs):
native.cc_binary(
name = name,
copts = _warnings + copts,
**kwargs
)

def neuropod_cc_test(name, copts = [], **kwargs):
native.cc_test(
name = name,
copts = _test_warnings + copts,
**kwargs
)
9 changes: 5 additions & 4 deletions source/neuropod/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -13,8 +13,9 @@
# limitations under the License.

load("@bazel_tools//tools/build_defs/pkg:pkg.bzl", "pkg_tar")
load("//bazel:cc.bzl", "neuropod_cc_binary", "neuropod_cc_library")

cc_library(
neuropod_cc_library(
name = "neuropod_hdrs",
hdrs = [
"neuropod.hh",
Expand All @@ -31,15 +32,15 @@ cc_library(
],
)

cc_library(
neuropod_cc_library(
name = "options",
hdrs = ["options.hh"],
visibility = [
"//visibility:public",
],
)

cc_binary(
neuropod_cc_binary(
name = "libneuropod.so",
linkopts = select({
"@bazel_tools//src/conditions:darwin": ["-Wl,-rpath,@loader_path"],
Expand All @@ -55,7 +56,7 @@ cc_binary(
],
)

cc_library(
neuropod_cc_library(
name = "neuropod_impl",
srcs = [
"neuropod.cc",
Expand Down
3 changes: 2 additions & 1 deletion source/neuropod/backends/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -13,8 +13,9 @@
# limitations under the License.

load("@bazel_tools//tools/build_defs/pkg:pkg.bzl", "pkg_tar")
load("//bazel:cc.bzl", "neuropod_cc_library")

cc_library(
neuropod_cc_library(
name = "neuropod_backend",
srcs = [
"neuropod_backend.cc",
Expand Down
4 changes: 2 additions & 2 deletions source/neuropod/backends/neuropod_backend.cc
Original file line number Diff line number Diff line change
Expand Up @@ -129,7 +129,7 @@ void validate_tensors_against_specs(const NeuropodValueMap & tensors,
}

// Validate the shape
for (int i = 0; i < spec.dims.size(); i++)
for (size_t i = 0; i < spec.dims.size(); i++)
{
auto dim = tensor->get_dims()[i];
auto expected = spec.dims[i];
Expand Down Expand Up @@ -318,7 +318,7 @@ std::unique_ptr<NeuropodValueMap> NeuropodBackend::infer_internal(const Neuropod
return out;
}

std::unique_ptr<NeuropodValueMap> NeuropodBackend::infer_internal(const NeuropodValueMap &inputs)
std::unique_ptr<NeuropodValueMap> NeuropodBackend::infer_internal(const NeuropodValueMap & /*unused*/)
{
NEUROPOD_ERROR("Backend implementations must provide a `infer_internal` implementation");
}
Expand Down
5 changes: 3 additions & 2 deletions source/neuropod/backends/python_bridge/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -13,8 +13,9 @@
# limitations under the License.

load("@bazel_tools//tools/build_defs/pkg:pkg.bzl", "pkg_tar")
load("//bazel:cc.bzl", "neuropod_cc_binary", "neuropod_cc_library")

cc_binary(
neuropod_cc_binary(
name = "libneuropod_pythonbridge_backend.so",
srcs = [
"//neuropod:libneuropod.so",
Expand All @@ -33,7 +34,7 @@ cc_binary(
],
)

cc_library(
neuropod_cc_library(
name = "python_bridge",
srcs = [
"python_bridge.cc",
Expand Down
5 changes: 3 additions & 2 deletions source/neuropod/backends/tensorflow/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -14,8 +14,9 @@

load("@bazel_tools//tools/build_defs/pkg:pkg.bzl", "pkg_tar")
load("//bazel:copy_libs.bzl", "copy_libs")
load("//bazel:cc.bzl", "neuropod_cc_binary", "neuropod_cc_library")

cc_binary(
neuropod_cc_binary(
name = "libneuropod_tensorflow_backend.so",
srcs = [
"//neuropod:libneuropod.so",
Expand All @@ -37,7 +38,7 @@ cc_binary(
],
)

cc_library(
neuropod_cc_library(
name = "tensorflow_backend",
srcs = [
"tf_backend.cc",
Expand Down
4 changes: 2 additions & 2 deletions source/neuropod/backends/tensorflow/tf_backend.cc
Original file line number Diff line number Diff line change
Expand Up @@ -186,12 +186,12 @@ void TensorflowNeuropodBackend::load_model_internal()

// Create a buffer of the right size
graph_stream->seekg(0, graph_stream->end);
std::streamsize graph_length = graph_stream->tellg();
auto graph_length = static_cast<size_t>(graph_stream->tellg());
std::vector<char> buffer(graph_length);

// Read into the buffer
graph_stream->seekg(0, graph_stream->beg);
graph_stream->read(buffer.data(), graph_length);
graph_stream->read(buffer.data(), static_cast<std::streamsize>(graph_length));
if (graph_stream->fail())
{
NEUROPOD_ERROR("Error reading TensorFlow GraphDef for neuropod {}", neuropod_path_);
Expand Down
8 changes: 4 additions & 4 deletions source/neuropod/backends/tensorflow/tf_tensor.cc
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,7 @@ class NeuropodTensorBuffer : public tensorflow::TensorBuffer

void FillAllocationDescription(tensorflow::AllocationDescription *proto) const override
{
tensorflow::int64 rb = size();
auto rb = static_cast<tensorflow::int64>(size());
proto->set_requested_bytes(rb);
proto->set_allocator_name(tensorflow::cpu_allocator()->Name());
}
Expand Down Expand Up @@ -116,11 +116,11 @@ tensorflow::TensorShape get_tf_shape(const std::vector<int64_t> &dims)
// Convert shapes
std::vector<int64_t> get_dims(const tensorflow::Tensor &tensor)
{
int num_dims = tensor.dims();
auto num_dims = static_cast<size_t>(tensor.dims());
std::vector<int64_t> shape(num_dims);
for (int i = 0; i < num_dims; i++)
for (size_t i = 0; i < num_dims; i++)
{
shape[i] = tensor.dim_size(i);
shape[i] = tensor.dim_size(static_cast<int>(i));
}

return shape;
Expand Down
12 changes: 6 additions & 6 deletions source/neuropod/backends/tensorflow/tf_tensor.hh
Original file line number Diff line number Diff line change
Expand Up @@ -160,26 +160,26 @@ public:
void copy_from(const std::vector<std::string> &data)
{
auto flat = tensor_.flat<StringType>();
if (data.size() != flat.size())
if (data.size() != static_cast<size_t>(flat.size()))
{
NEUROPOD_ERROR("Supplied vector size ({}) does not match size of tensor ({})", data.size(), flat.size());
}

for (int i = 0; i < data.size(); i++)
for (size_t i = 0; i < data.size(); i++)
{
flat(i) = data[i];
flat(static_cast<long>(i)) = data[i];
}
}

tensorflow::Tensor &get_native_data() { return tensor_; }

protected:
std::string get(size_t index) const { return tensor_.flat<StringType>()(index); }
std::string get(size_t index) const { return tensor_.flat<StringType>()(static_cast<long>(index)); }

void set(size_t index, const std::string &value)
{
auto flat = tensor_.flat<StringType>();
flat(index) = value;
auto flat = tensor_.flat<StringType>();
flat(static_cast<long>(index)) = value;
}
};

Expand Down
6 changes: 4 additions & 2 deletions source/neuropod/backends/torchscript/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -14,8 +14,9 @@

load("@bazel_tools//tools/build_defs/pkg:pkg.bzl", "pkg_tar")
load("//bazel:copy_libs.bzl", "copy_libs")
load("//bazel:cc.bzl", "neuropod_cc_binary", "neuropod_cc_library")

cc_binary(
neuropod_cc_binary(
name = "libneuropod_torchscript_backend.so",
srcs = [
"//neuropod:libneuropod.so",
Expand All @@ -40,10 +41,11 @@ cc_binary(
],
)

cc_library(
neuropod_cc_library(
name = "torch_backend",
srcs = [
"torch_backend.cc",
"torch_tensor.cc",
"type_utils.cc",
],
hdrs = [
Expand Down
24 changes: 12 additions & 12 deletions source/neuropod/backends/torchscript/torch_backend.cc
Original file line number Diff line number Diff line change
Expand Up @@ -35,15 +35,15 @@ namespace
{

#if CAFFE2_NIGHTLY_VERSION >= 20200115
#define MAKE_DICT(name, type) torch::Dict<std::string, type> name;
#define MAKE_DICT(name, type) torch::Dict<std::string, type> name
#elif CAFFE2_NIGHTLY_VERSION >= 20191010
#define MAKE_DICT(name, type) c10::impl::GenericDict name(c10::AnyType::get(), c10::AnyType::get());
#define MAKE_DICT(name, type) c10::impl::GenericDict name(c10::AnyType::get(), c10::AnyType::get())
#elif CAFFE2_NIGHTLY_VERSION >= 20190717
#define MAKE_DICT(name, type) c10::impl::GenericDict name((c10::impl::deprecatedUntypedDict()));
#define MAKE_DICT(name, type) c10::impl::GenericDict name((c10::impl::deprecatedUntypedDict()))
#elif CAFFE2_NIGHTLY_VERSION >= 20190601
#define MAKE_DICT(name, type) auto name = c10::make_dict<torch::jit::IValue, torch::jit::IValue>();
#define MAKE_DICT(name, type) auto name = c10::make_dict<torch::jit::IValue, torch::jit::IValue>()
#else
#define MAKE_DICT(name, type) torch::ivalue::UnorderedMap name;
#define MAKE_DICT(name, type) torch::ivalue::UnorderedMap name
#endif

#if CAFFE2_NIGHTLY_VERSION >= 20190717
Expand All @@ -55,11 +55,11 @@ namespace
#if CAFFE2_NIGHTLY_VERSION >= 20190601
#define KEY(elem) (elem.key())
#define VALUE(elem) (elem.value())
#define DICT_INSERT(dict, key, value) dict.insert(key, value);
#define DICT_INSERT(dict, key, value) dict.insert(key, value)
#else
#define KEY(elem) (elem.first)
#define VALUE(elem) (elem.second)
#define DICT_INSERT(dict, key, value) dict[key] = value;
#define DICT_INSERT(dict, key, value) dict[key] = value
#endif

std::shared_ptr<torch::jit::script::Module> load_model_from_path(std::istream & graph_stream,
Expand Down Expand Up @@ -133,8 +133,8 @@ void insert_value_in_output(NeuropodValueMap & output,
auto tensor = value.toTensor().to(torch::kCPU);

// Get the type and make a TorchNeuropodTensor
auto tensor_type = get_neuropod_type_from_torch_type(tensor.scalar_type());
auto neuropod_tensor = make_tensor<TorchNeuropodTensor>(tensor_type, tensor);
auto neuropod_tensor_type = get_neuropod_type_from_torch_type(tensor.scalar_type());
auto neuropod_tensor = make_tensor<TorchNeuropodTensor>(neuropod_tensor_type, tensor);

// Add it to our output
auto &to_set = output[name];
Expand Down Expand Up @@ -287,7 +287,7 @@ torch::Device TorchNeuropodBackend::get_torch_device(NeuropodDeviceType target_d
return torch::kCPU;
}

return torch::Device(torch::kCUDA, options_.visible_device);
return torch::Device(torch::kCUDA, static_cast<short>(options_.visible_device));
}

// Run inference
Expand Down Expand Up @@ -381,7 +381,7 @@ std::unique_ptr<NeuropodValueMap> TorchNeuropodBackend::infer_internal(const Neu
schema);
}

torch_inputs.at(arg_index.value() - (has_class_type ? 1 : 0)) = input_data;
torch_inputs.at(static_cast<size_t>(arg_index.value() - (has_class_type ? 1 : 0))) = input_data;
}
}

Expand Down Expand Up @@ -432,7 +432,7 @@ std::unique_ptr<NeuropodValueMap> TorchNeuropodBackend::infer_internal(const Neu
{
// This is a named tuple
// NOLINTNEXTLINE(modernize-loop-convert): Can't always use a range based loop here
for (int i = 0; i < elems.size(); i++)
for (size_t i = 0; i < elems.size(); i++)
{
insert_value_in_output(*to_return, GET_NAME(i), elems.at(i));
}
Expand Down
26 changes: 26 additions & 0 deletions source/neuropod/backends/torchscript/torch_tensor.cc
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
/* Copyright (c) 2020 UATC, LLC

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

#include "neuropod/backends/torchscript/torch_tensor.hh"

namespace neuropod
{

torch::jit::IValue get_ivalue_from_torch_tensor(const std::shared_ptr<NeuropodValue> &tensor)
{
return std::dynamic_pointer_cast<NativeDataContainer<torch::jit::IValue>>(tensor)->get_native_data();
}

} // namespace neuropod
Loading