Skip to content

Commit

Permalink
Fix various using namespace issues
Browse files Browse the repository at this point in the history
Summary:
X-link: facebookresearch/projectaria_tools#136

Summary
#buildsonlynotests - No runtime effects!
 - If you approve of this diff, please use the "Accept & Ship" button
ig-no-test - Doesn't require Instagram testing.

Reviewed By: dmm-fb

Differential Revision: D62898268
  • Loading branch information
r-barnes authored and facebook-github-bot committed Sep 18, 2024
1 parent 26c736e commit 8026b85
Show file tree
Hide file tree
Showing 3 changed files with 87 additions and 75 deletions.
2 changes: 2 additions & 0 deletions backends/vulkan/test/utils/test_utils.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,8 @@
#include <cassert>
#include <random>

using namespace vkcompute;

//
// Operator Recording Functions
//
Expand Down
159 changes: 84 additions & 75 deletions backends/vulkan/test/utils/test_utils.h
Original file line number Diff line number Diff line change
Expand Up @@ -16,37 +16,39 @@
#include <executorch/backends/vulkan/runtime/graph/ops/utils/ShaderNameUtils.h>
#include <executorch/backends/vulkan/runtime/graph/ops/utils/StagingUtils.h>

using namespace vkcompute;

#define CREATE_FLOAT_TEXTURE(sizes, allocate_memory) \
api::vTensor( \
api::context(), \
vkcompute::api::vTensor( \
vkcompute::api::context(), \
sizes, \
vkapi::kFloat, \
utils::StorageType::TEXTURE_3D, \
utils::GPUMemoryLayout::TENSOR_CHANNELS_PACKED, \
allocate_memory);

#define CREATE_FLOAT_BUFFER(sizes, allocate_memory) \
api::vTensor( \
api::context(), \
vkcompute::api::vTensor( \
vkcompute::api::context(), \
sizes, \
vkapi::kFloat, \
utils::StorageType::BUFFER, \
utils::GPUMemoryLayout::TENSOR_WIDTH_PACKED, \
allocate_memory);

#define DEFINE_STAGING_BUFFER_AND_RECORD_TO_GPU_FOR(tensor) \
api::StagingBuffer staging_buffer_##tensor( \
api::context(), vkapi::kFloat, tensor.staging_buffer_numel()); \
record_nchw_to_image_op( \
api::context(), staging_buffer_##tensor.buffer(), tensor);

#define DEFINE_STAGING_BUFFER_AND_RECORD_FROM_GPU_FOR(tensor) \
api::StagingBuffer staging_buffer_##tensor( \
api::context(), vkapi::kFloat, tensor.staging_buffer_numel()); \
record_image_to_nchw_op( \
api::context(), tensor, staging_buffer_##tensor.buffer());
#define DEFINE_STAGING_BUFFER_AND_RECORD_TO_GPU_FOR(tensor) \
vkcompute::api::StagingBuffer staging_buffer_##tensor( \
vkcompute::api::context(), \
vkapi::kFloat, \
tensor.staging_buffer_numel()); \
record_nchw_to_image_op( \
vkcompute::api::context(), staging_buffer_##tensor.buffer(), tensor);

#define DEFINE_STAGING_BUFFER_AND_RECORD_FROM_GPU_FOR(tensor) \
vkcompute::api::StagingBuffer staging_buffer_##tensor( \
vkcompute::api::context(), \
vkapi::kFloat, \
tensor.staging_buffer_numel()); \
record_image_to_nchw_op( \
vkcompute::api::context(), tensor, staging_buffer_##tensor.buffer());

#define CHECK_VALUE(data, idx, expected) \
do { \
Expand All @@ -63,76 +65,80 @@ using namespace vkcompute;
//

void record_nchw_to_buffer_op(
api::Context* const context,
vkapi::VulkanBuffer& src_buffer,
api::vTensor& v_dst);
vkcompute::api::Context* const context,
vkcompute::vkapi::VulkanBuffer& src_buffer,
vkcompute::api::vTensor& v_dst);

void record_buffer_to_nchw_op(
api::Context* const context,
api::vTensor& v_src,
vkapi::VulkanBuffer& dst_buffer);
vkcompute::api::Context* const context,
vkcompute::api::vTensor& v_src,
vkcompute::vkapi::VulkanBuffer& dst_buffer);

void record_nchw_to_image_op(
api::Context* const context,
vkapi::VulkanBuffer& src_buffer,
api::vTensor& v_dst);
vkcompute::api::Context* const context,
vkcompute::vkapi::VulkanBuffer& src_buffer,
vkcompute::api::vTensor& v_dst);

void record_image_to_nchw_op(
api::Context* const context,
api::vTensor& v_src,
vkapi::VulkanBuffer& dst_buffer);
vkcompute::api::Context* const context,
vkcompute::api::vTensor& v_src,
vkcompute::vkapi::VulkanBuffer& dst_buffer);

void record_int8_image_to_nchw_noint8_op(
api::Context* const context,
api::vTensor& v_src,
api::StagingBuffer& dst_buffer);
vkcompute::api::Context* const context,
vkcompute::api::vTensor& v_src,
vkcompute::api::StagingBuffer& dst_buffer);

void record_conv2d_prepack_weights_op(
api::Context* const context,
vkapi::VulkanBuffer& src_buffer,
api::vTensor& v_dst,
vkcompute::api::Context* const context,
vkcompute::vkapi::VulkanBuffer& src_buffer,
vkcompute::api::vTensor& v_dst,
const std::vector<int64_t>& original_sizes,
const bool transposed);

void record_binary_op(
api::Context* const context,
vkcompute::api::Context* const context,
const std::string& op_name,
api::vTensor& v_in1,
api::vTensor& v_in2,
api::vTensor& v_dst);
vkcompute::api::vTensor& v_in1,
vkcompute::api::vTensor& v_in2,
vkcompute::api::vTensor& v_dst);

void execute_and_check_add(
api::vTensor& a,
api::vTensor& b,
api::vTensor& c,
vkcompute::api::vTensor& a,
vkcompute::api::vTensor& b,
vkcompute::api::vTensor& c,
float a_val,
float b_val);

void record_index_fill_buffer(api::Context* const context, api::vTensor& v_ten);
void record_index_fill_buffer(
vkcompute::api::Context* const context,
vkcompute::api::vTensor& v_ten);

void record_scalar_add_buffer(
api::Context* context,
api::vTensor& v_ten,
vkcompute::api::Context* context,
vkcompute::api::vTensor& v_ten,
float offset);

void record_reference_matmul(
api::Context* context,
api::vTensor& out,
api::vTensor& mat1,
api::vTensor& mat2);
vkcompute::api::Context* context,
vkcompute::api::vTensor& out,
vkcompute::api::vTensor& mat1,
vkcompute::api::vTensor& mat2);

void record_matmul_texture3d(
api::Context* context,
api::vTensor& out,
api::vTensor& mat1,
api::vTensor& mat2);
vkcompute::api::Context* context,
vkcompute::api::vTensor& out,
vkcompute::api::vTensor& mat1,
vkcompute::api::vTensor& mat2);

//
// Input & Output Utilities
//

inline void
fill_staging(api::StagingBuffer& staging, float val, int numel = -1) {
inline void fill_staging(
vkcompute::api::StagingBuffer& staging,
float val,
int numel = -1) {
if (numel < 0) {
numel = staging.numel();
}
Expand All @@ -141,9 +147,9 @@ fill_staging(api::StagingBuffer& staging, float val, int numel = -1) {
staging.copy_from(data.data(), sizeof(float) * numel);
}

void fill_vtensor(api::vTensor& vten, std::vector<float>& data);
void fill_vtensor(vkcompute::api::vTensor& vten, std::vector<float>& data);

void fill_vtensor(api::vTensor& vten, float val, bool iota = false);
void fill_vtensor(vkcompute::api::vTensor& vten, float val, bool iota = false);

std::vector<float> create_random_float_buffer(
const size_t numel,
Expand All @@ -156,21 +162,23 @@ std::vector<uint8_t> create_random_uint8_buffer(
const uint8_t max = 255);

void fill_vtensor(
ComputeGraph& graph,
const IOValueRef idx,
vkcompute::ComputeGraph& graph,
const vkcompute::IOValueRef idx,
float val,
bool iota = false);

void extract_vtensor(api::vTensor& vten, std::vector<float>& data);
void extract_vtensor(vkcompute::api::vTensor& vten, std::vector<float>& data);

inline std::vector<float> extract_vtensor(api::vTensor& vten) {
inline std::vector<float> extract_vtensor(vkcompute::api::vTensor& vten) {
std::vector<float> data_out(vten.staging_buffer_numel());
extract_vtensor(vten, data_out);
return data_out;
}

inline void
check_staging_buffer(api::StagingBuffer& staging, float val, int numel = -1) {
inline void check_staging_buffer(
vkcompute::api::StagingBuffer& staging,
float val,
int numel = -1) {
if (numel < 0) {
numel = staging.numel();
}
Expand All @@ -183,21 +191,21 @@ check_staging_buffer(api::StagingBuffer& staging, float val, int numel = -1) {
}

inline int64_t get_buf_idx(
ComputeGraph& graph,
IOValueRef ref,
vkcompute::ComputeGraph& graph,
vkcompute::IOValueRef ref,
const std::vector<int64_t>& tensor_coor) {
vTensorPtr vten_ptr = graph.get_tensor(ref.value);
vkcompute::vTensorPtr vten_ptr = graph.get_tensor(ref.value);

const std::vector<int64_t>& sizes = vten_ptr->sizes();

int64_t c = dim_at<kChannel4D>(sizes);
int64_t h = dim_at<kHeight4D>(sizes);
int64_t w = dim_at<kWidth4D>(sizes);
int64_t c = vkcompute::dim_at<vkcompute::kChannel4D>(sizes);
int64_t h = vkcompute::dim_at<vkcompute::kHeight4D>(sizes);
int64_t w = vkcompute::dim_at<vkcompute::kWidth4D>(sizes);

int64_t ni = dim_at<kBatch4D>(tensor_coor);
int64_t ci = dim_at<kChannel4D>(tensor_coor);
int64_t hi = dim_at<kHeight4D>(tensor_coor);
int64_t wi = dim_at<kWidth4D>(tensor_coor);
int64_t ni = vkcompute::dim_at<vkcompute::kBatch4D>(tensor_coor);
int64_t ci = vkcompute::dim_at<vkcompute::kChannel4D>(tensor_coor);
int64_t hi = vkcompute::dim_at<vkcompute::kHeight4D>(tensor_coor);
int64_t wi = vkcompute::dim_at<vkcompute::kWidth4D>(tensor_coor);

return (ni * c * h * w + ci * h * w + hi * w + wi);
}
Expand All @@ -208,7 +216,8 @@ inline int64_t get_buf_idx(

void submit_to_gpu();

vkapi::Allocation allocate_memory_for(const api::vTensor& vten);
vkcompute::vkapi::Allocation allocate_memory_for(
const vkcompute::api::vTensor& vten);

VmaTotalStatistics get_vma_stats();

Expand All @@ -219,7 +228,7 @@ size_t get_vma_allocation_count();
//

void execute_graph_and_check_output(
ComputeGraph& graph,
vkcompute::ComputeGraph& graph,
std::vector<float> input_vals,
std::vector<float> expected_outputs);

Expand Down
1 change: 1 addition & 0 deletions backends/vulkan/test/vulkan_compute_api_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@

#include <executorch/backends/vulkan/test/utils/test_utils.h>

using namespace vkcompute;
using namespace vkcompute::api;

std::vector<float>
Expand Down

0 comments on commit 8026b85

Please sign in to comment.