Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[Build] Refactor backends to adhere to stricter build warnings #442

Merged
merged 1 commit into from
Oct 1, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 6 additions & 0 deletions source/bazel/cc.bzl
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,12 @@ _warnings = [
# We're okay with these for now, but should refactor to remove if we can
"-Wno-global-constructors",
"-Wno-exit-time-destructors",

# It's okay if we use `default` in switch statements
"-Wno-switch-enum",

# Flexible array members
"-Wno-c99-extensions",
]

_test_warnings = _warnings + [
Expand Down
3 changes: 2 additions & 1 deletion source/neuropod/backends/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -13,8 +13,9 @@
# limitations under the License.

load("@bazel_tools//tools/build_defs/pkg:pkg.bzl", "pkg_tar")
load("//bazel:cc.bzl", "neuropod_cc_library")

cc_library(
neuropod_cc_library(
name = "neuropod_backend",
srcs = [
"neuropod_backend.cc",
Expand Down
4 changes: 2 additions & 2 deletions source/neuropod/backends/neuropod_backend.cc
Original file line number Diff line number Diff line change
Expand Up @@ -129,7 +129,7 @@ void validate_tensors_against_specs(const NeuropodValueMap & tensors,
}

// Validate the shape
for (int i = 0; i < spec.dims.size(); i++)
for (size_t i = 0; i < spec.dims.size(); i++)
{
auto dim = tensor->get_dims()[i];
auto expected = spec.dims[i];
Expand Down Expand Up @@ -318,7 +318,7 @@ std::unique_ptr<NeuropodValueMap> NeuropodBackend::infer_internal(const Neuropod
return out;
}

std::unique_ptr<NeuropodValueMap> NeuropodBackend::infer_internal(const NeuropodValueMap &inputs)
std::unique_ptr<NeuropodValueMap> NeuropodBackend::infer_internal(const NeuropodValueMap & /*unused*/)
{
NEUROPOD_ERROR("Backend implementations must provide a `infer_internal` implementation");
}
Expand Down
5 changes: 3 additions & 2 deletions source/neuropod/backends/python_bridge/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -13,8 +13,9 @@
# limitations under the License.

load("@bazel_tools//tools/build_defs/pkg:pkg.bzl", "pkg_tar")
load("//bazel:cc.bzl", "neuropod_cc_binary", "neuropod_cc_library")

cc_binary(
neuropod_cc_binary(
name = "libneuropod_pythonbridge_backend.so",
srcs = [
"//neuropod:libneuropod.so",
Expand All @@ -33,7 +34,7 @@ cc_binary(
],
)

cc_library(
neuropod_cc_library(
name = "python_bridge",
srcs = [
"python_bridge.cc",
Expand Down
5 changes: 3 additions & 2 deletions source/neuropod/backends/tensorflow/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -14,8 +14,9 @@

load("@bazel_tools//tools/build_defs/pkg:pkg.bzl", "pkg_tar")
load("//bazel:copy_libs.bzl", "copy_libs")
load("//bazel:cc.bzl", "neuropod_cc_binary", "neuropod_cc_library")

cc_binary(
neuropod_cc_binary(
name = "libneuropod_tensorflow_backend.so",
srcs = [
"//neuropod:libneuropod.so",
Expand All @@ -37,7 +38,7 @@ cc_binary(
],
)

cc_library(
neuropod_cc_library(
name = "tensorflow_backend",
srcs = [
"tf_backend.cc",
Expand Down
4 changes: 2 additions & 2 deletions source/neuropod/backends/tensorflow/tf_backend.cc
Original file line number Diff line number Diff line change
Expand Up @@ -186,12 +186,12 @@ void TensorflowNeuropodBackend::load_model_internal()

// Create a buffer of the right size
graph_stream->seekg(0, graph_stream->end);
std::streamsize graph_length = graph_stream->tellg();
auto graph_length = static_cast<size_t>(graph_stream->tellg());
std::vector<char> buffer(graph_length);

// Read into the buffer
graph_stream->seekg(0, graph_stream->beg);
graph_stream->read(buffer.data(), graph_length);
graph_stream->read(buffer.data(), static_cast<std::streamsize>(graph_length));
if (graph_stream->fail())
{
NEUROPOD_ERROR("Error reading TensorFlow GraphDef for neuropod {}", neuropod_path_);
Expand Down
8 changes: 4 additions & 4 deletions source/neuropod/backends/tensorflow/tf_tensor.cc
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,7 @@ class NeuropodTensorBuffer : public tensorflow::TensorBuffer

void FillAllocationDescription(tensorflow::AllocationDescription *proto) const override
{
tensorflow::int64 rb = size();
auto rb = static_cast<tensorflow::int64>(size());
proto->set_requested_bytes(rb);
proto->set_allocator_name(tensorflow::cpu_allocator()->Name());
}
Expand Down Expand Up @@ -116,11 +116,11 @@ tensorflow::TensorShape get_tf_shape(const std::vector<int64_t> &dims)
// Convert shapes
std::vector<int64_t> get_dims(const tensorflow::Tensor &tensor)
{
int num_dims = tensor.dims();
auto num_dims = static_cast<size_t>(tensor.dims());
std::vector<int64_t> shape(num_dims);
for (int i = 0; i < num_dims; i++)
for (size_t i = 0; i < num_dims; i++)
{
shape[i] = tensor.dim_size(i);
shape[i] = tensor.dim_size(static_cast<int>(i));
}

return shape;
Expand Down
12 changes: 6 additions & 6 deletions source/neuropod/backends/tensorflow/tf_tensor.hh
Original file line number Diff line number Diff line change
Expand Up @@ -160,26 +160,26 @@ public:
void copy_from(const std::vector<std::string> &data)
{
auto flat = tensor_.flat<StringType>();
if (data.size() != flat.size())
if (data.size() != static_cast<size_t>(flat.size()))
{
NEUROPOD_ERROR("Supplied vector size ({}) does not match size of tensor ({})", data.size(), flat.size());
}

for (int i = 0; i < data.size(); i++)
for (size_t i = 0; i < data.size(); i++)
{
flat(i) = data[i];
flat(static_cast<long>(i)) = data[i];
}
}

tensorflow::Tensor &get_native_data() { return tensor_; }

protected:
std::string get(size_t index) const { return tensor_.flat<StringType>()(index); }
std::string get(size_t index) const { return tensor_.flat<StringType>()(static_cast<long>(index)); }

void set(size_t index, const std::string &value)
{
auto flat = tensor_.flat<StringType>();
flat(index) = value;
auto flat = tensor_.flat<StringType>();
flat(static_cast<long>(index)) = value;
}
};

Expand Down
5 changes: 3 additions & 2 deletions source/neuropod/backends/torchscript/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -14,8 +14,9 @@

load("@bazel_tools//tools/build_defs/pkg:pkg.bzl", "pkg_tar")
load("//bazel:copy_libs.bzl", "copy_libs")
load("//bazel:cc.bzl", "neuropod_cc_binary", "neuropod_cc_library")

cc_binary(
neuropod_cc_binary(
name = "libneuropod_torchscript_backend.so",
srcs = [
"//neuropod:libneuropod.so",
Expand All @@ -40,7 +41,7 @@ cc_binary(
],
)

cc_library(
neuropod_cc_library(
name = "torch_backend",
srcs = [
"torch_backend.cc",
Expand Down
24 changes: 12 additions & 12 deletions source/neuropod/backends/torchscript/torch_backend.cc
Original file line number Diff line number Diff line change
Expand Up @@ -35,15 +35,15 @@ namespace
{

#if CAFFE2_NIGHTLY_VERSION >= 20200115
#define MAKE_DICT(name, type) torch::Dict<std::string, type> name;
#define MAKE_DICT(name, type) torch::Dict<std::string, type> name
#elif CAFFE2_NIGHTLY_VERSION >= 20191010
#define MAKE_DICT(name, type) c10::impl::GenericDict name(c10::AnyType::get(), c10::AnyType::get());
#define MAKE_DICT(name, type) c10::impl::GenericDict name(c10::AnyType::get(), c10::AnyType::get())
#elif CAFFE2_NIGHTLY_VERSION >= 20190717
#define MAKE_DICT(name, type) c10::impl::GenericDict name((c10::impl::deprecatedUntypedDict()));
#define MAKE_DICT(name, type) c10::impl::GenericDict name((c10::impl::deprecatedUntypedDict()))
#elif CAFFE2_NIGHTLY_VERSION >= 20190601
#define MAKE_DICT(name, type) auto name = c10::make_dict<torch::jit::IValue, torch::jit::IValue>();
#define MAKE_DICT(name, type) auto name = c10::make_dict<torch::jit::IValue, torch::jit::IValue>()
#else
#define MAKE_DICT(name, type) torch::ivalue::UnorderedMap name;
#define MAKE_DICT(name, type) torch::ivalue::UnorderedMap name
#endif

#if CAFFE2_NIGHTLY_VERSION >= 20190717
Expand All @@ -55,11 +55,11 @@ namespace
#if CAFFE2_NIGHTLY_VERSION >= 20190601
#define KEY(elem) (elem.key())
#define VALUE(elem) (elem.value())
#define DICT_INSERT(dict, key, value) dict.insert(key, value);
#define DICT_INSERT(dict, key, value) dict.insert(key, value)
#else
#define KEY(elem) (elem.first)
#define VALUE(elem) (elem.second)
#define DICT_INSERT(dict, key, value) dict[key] = value;
#define DICT_INSERT(dict, key, value) dict[key] = value
#endif

std::shared_ptr<torch::jit::script::Module> load_model_from_path(std::istream & graph_stream,
Expand Down Expand Up @@ -133,8 +133,8 @@ void insert_value_in_output(NeuropodValueMap & output,
auto tensor = value.toTensor().to(torch::kCPU);

// Get the type and make a TorchNeuropodTensor
auto tensor_type = get_neuropod_type_from_torch_type(tensor.scalar_type());
auto neuropod_tensor = make_tensor<TorchNeuropodTensor>(tensor_type, tensor);
auto neuropod_tensor_type = get_neuropod_type_from_torch_type(tensor.scalar_type());
auto neuropod_tensor = make_tensor<TorchNeuropodTensor>(neuropod_tensor_type, tensor);

// Add it to our output
auto &to_set = output[name];
Expand Down Expand Up @@ -287,7 +287,7 @@ torch::Device TorchNeuropodBackend::get_torch_device(NeuropodDeviceType target_d
return torch::kCPU;
}

return torch::Device(torch::kCUDA, options_.visible_device);
return torch::Device(torch::kCUDA, static_cast<short>(options_.visible_device));
}

// Run inference
Expand Down Expand Up @@ -381,7 +381,7 @@ std::unique_ptr<NeuropodValueMap> TorchNeuropodBackend::infer_internal(const Neu
schema);
}

torch_inputs.at(arg_index.value() - (has_class_type ? 1 : 0)) = input_data;
torch_inputs.at(static_cast<size_t>(arg_index.value() - (has_class_type ? 1 : 0))) = input_data;
}
}

Expand Down Expand Up @@ -432,7 +432,7 @@ std::unique_ptr<NeuropodValueMap> TorchNeuropodBackend::infer_internal(const Neu
{
// This is a named tuple
// NOLINTNEXTLINE(modernize-loop-convert): Can't always use a range based loop here
for (int i = 0; i < elems.size(); i++)
for (size_t i = 0; i < elems.size(); i++)
{
insert_value_in_output(*to_return, GET_NAME(i), elems.at(i));
}
Expand Down
23 changes: 14 additions & 9 deletions source/neuropod/backends/torchscript/torch_tensor.hh
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,11 @@ limitations under the License.
// If we're not building with a nightly relase of torch,
// set the date to match the date of the official release
#ifndef CAFFE2_NIGHTLY_VERSION
#if CAFFE2_VERSION == 10100
// The date of the official torch 1.1.0 release
#define CAFFE2_NIGHTLY_VERSION 20190430
#endif

#if CAFFE2_VERSION == 10200
// The date of the official torch 1.2.0 release
#define CAFFE2_NIGHTLY_VERSION 20190808
Expand Down Expand Up @@ -67,27 +72,27 @@ T *get_data_from_torch_tensor(const torch::Tensor &tensor)
}

template <>
uint16_t *get_data_from_torch_tensor(const torch::Tensor &tensor)
[[maybe_unused]] uint16_t *get_data_from_torch_tensor(const torch::Tensor & /*unused*/)
{
NEUROPOD_ERROR("TorchScript doesn't support type uint16_t");
}

template <>
uint32_t *get_data_from_torch_tensor(const torch::Tensor &tensor)
[[maybe_unused]] uint32_t *get_data_from_torch_tensor(const torch::Tensor & /*unused*/)
{
NEUROPOD_ERROR("TorchScript doesn't support type uint32_t");
}

template <>
uint64_t *get_data_from_torch_tensor(const torch::Tensor &tensor)
[[maybe_unused]] uint64_t *get_data_from_torch_tensor(const torch::Tensor & /*unused*/)
{
NEUROPOD_ERROR("TorchScript doesn't support type uint64_t");
}

torch::Deleter get_torch_deleter(const Deleter &deleter, void *data)
[[maybe_unused]] torch::Deleter get_torch_deleter(const Deleter &deleter, void *data)
{
auto handle = register_deleter(deleter, data);
return [handle](void *unused) { run_deleter(handle); };
return [handle](void * /*unused*/) { run_deleter(handle); };
}

} // namespace
Expand Down Expand Up @@ -116,7 +121,7 @@ public:
}

// Wrap an existing torch tensor
TorchNeuropodTensor(torch::Tensor tensor) : TypedNeuropodTensor<T>(tensor.sizes().vec()), tensor(tensor) {}
TorchNeuropodTensor(torch::Tensor t) : TypedNeuropodTensor<T>(t.sizes().vec()), tensor(t) {}

~TorchNeuropodTensor() = default;

Expand Down Expand Up @@ -147,7 +152,7 @@ protected:
}
else
{
out = tensor.to(torch::Device(torch::kCUDA, device));
out = tensor.to(torch::Device(torch::kCUDA, static_cast<short>(device)));
}

return std::make_shared<TorchNeuropodTensor<T>>(std::move(out));
Expand All @@ -158,7 +163,7 @@ protected:
#define ELEMENTS(collection) collection
#define GET_STRING_FROM_LIST(item) item
#else
#define ELEMENTS(collectionptr) collectionptr->elements();
#define ELEMENTS(collectionptr) collectionptr->elements()
#define GET_STRING_FROM_LIST(item) item.toStringRef()
#endif

Expand Down Expand Up @@ -254,7 +259,7 @@ protected:
};

// Utility function to get an IValue from a torch tensor
torch::jit::IValue get_ivalue_from_torch_tensor(const std::shared_ptr<NeuropodValue> &tensor)
inline torch::jit::IValue get_ivalue_from_torch_tensor(const std::shared_ptr<NeuropodValue> &tensor)
{
return std::dynamic_pointer_cast<NativeDataContainer<torch::jit::IValue>>(tensor)->get_native_data();
}
Expand Down
7 changes: 5 additions & 2 deletions source/neuropod/internal/backend_registration.hh
Original file line number Diff line number Diff line change
Expand Up @@ -74,8 +74,11 @@ BackendFactoryFunction get_backend_for_type(const std::vector<BackendLoadSpec> &

// A macro to easily define a backend
// Example: REGISTER_NEUROPOD_BACKEND(SomeTensorflowBackend, "tensorflow", "1.13.1")
#define REGISTER_NEUROPOD_BACKEND(CLS, type, version) \
bool is_registered_##CLS = register_backend(#CLS, type, version, createNeuropodBackend<CLS>);
#define REGISTER_NEUROPOD_BACKEND(CLS, type, version) \
namespace \
{ \
bool is_registered_##CLS = register_backend(#CLS, type, version, createNeuropodBackend<CLS>); \
}

// Utility macros
#define STR_HELPER(x) #x
Expand Down