Skip to content
Closed
Show file tree
Hide file tree
Changes from 6 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 6 additions & 1 deletion cmake/cuda.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -83,7 +83,12 @@ if(CUDA_FOUND)

add_library(cuda INTERFACE)
target_include_directories(cuda SYSTEM INTERFACE "${CUDA_INCLUDE_DIRS}")

target_link_libraries(cuda INTERFACE
${CUDADRV_LIBRARIES}
${CUDA_LIBRARIES}
${CUDA_CUBLAS_LIBRARIES}
${CUDA_curand_LIBRARY})

else()
message( FATAL_ERROR "CUDA package not found -> specify search path via CUDA_ROOT variable")
endif()
1 change: 1 addition & 0 deletions lib/CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@

add_subdirectory(pcg)
add_subdirectory(compiler)
add_subdirectory(runtime)
Expand Down
2 changes: 2 additions & 0 deletions lib/kernels/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -37,3 +37,5 @@ set_target_properties(
PROPERTIES
CUDA_STANDARD 17
)

add_subdirectory(test)
2 changes: 2 additions & 0 deletions lib/kernels/include/kernels/array_shape.h
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,8 @@ struct ArrayShape {

std::optional<std::size_t> at_maybe(std::size_t) const;

bool operator==(ArrayShape const &other) const; // for test case

ArrayShape reversed_dim_order() const;
ArrayShape sub_shape(std::optional<legion_dim_t> start,
std::optional<legion_dim_t> end);
Expand Down
4 changes: 3 additions & 1 deletion lib/kernels/include/kernels/profiling.h
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@

namespace FlexFlow {

struct ProfilingSettings : public use_visitable_cmp<ProfilingSettings> {
struct ProfilingSettings {
public:
ProfilingSettings() = delete;
ProfilingSettings(int warmup_iters, int measure_iters);
Expand All @@ -17,6 +17,8 @@ struct ProfilingSettings : public use_visitable_cmp<ProfilingSettings> {
int measure_iters;
};

FF_VISITABLE_STRUCT(ProfilingSettings, warmup_iters, measure_iters);

template <typename F, typename... Ts>
optional<float>
profiling_wrapper(F const &f, bool enable_profiling, Ts &&...ts) {
Expand Down
62 changes: 62 additions & 0 deletions lib/kernels/src/array_shape.cc
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,69 @@ ArrayShape::ArrayShape(size_t *_dims, size_t num_dims)
: dims(_dims, _dims + num_dims) {}

std::size_t ArrayShape::get_volume() const {
return num_elements();
}

std::size_t ArrayShape::get_dim() const {
return num_dims();
}

std::size_t ArrayShape::num_elements() const {
return product(this->dims);
}

std::size_t ArrayShape::num_dims() const {
return this->dims.size();
}

std::size_t ArrayShape::operator[](legion_dim_t idx) const {
return dims.at(idx);
}

std::size_t ArrayShape::at(legion_dim_t idx) const {
return dims.at(idx);
}

legion_dim_t ArrayShape::last_idx() const {
return legion_dim_t(dims.size() - 1);
}

legion_dim_t ArrayShape::neg_idx(int idx) const {
assert(idx < 0 && "Idx should be negative for negative indexing");
return legion_dim_t(dims.size() + idx);
}

optional<std::size_t> ArrayShape::at_maybe(std::size_t idx) const {
if (idx < dims.size()) {
return dims[legion_dim_t(idx)];
} else {
return {};
}
}

ArrayShape ArrayShape::reversed_dim_order() const {
std::vector<std::size_t> dims_reversed(dims.rbegin(), dims.rend());
return ArrayShape(dims_reversed);
}

ArrayShape ArrayShape::sub_shape(optional<legion_dim_t> start,
optional<legion_dim_t> end) {
size_t s = start.has_value() ? start.value().value() : 0;
size_t e = end.has_value() ? end.value().value() : dims.size();
std::vector<std::size_t> sub_dims(dims.begin() + s, dims.begin() + e);
return ArrayShape(sub_dims);
}

bool ArrayShape::operator==(ArrayShape const &other) const {
if (this->dims.size() != other.dims.size()) {
return false;
}

return this->dims == other.dims;
}

size_t get_volume(ArrayShape const &shape) {
return shape.get_volume();
}

} // namespace FlexFlow
5 changes: 3 additions & 2 deletions lib/kernels/src/perf_metrics.cc
Original file line number Diff line number Diff line change
Expand Up @@ -15,8 +15,9 @@ PerfMetrics::PerfMetrics(int _train_all,
double _start_time_micro,
double _current_time_micro)
: train_all(_train_all), train_correct(_train_correct), cce_loss(_cce_loss),
mse_loss(_mse_loss), rmse_loss(_rmse_loss), mae_loss(_mae_loss),
start_time(_start_time_micro), current_time(_current_time_micro) {}
sparse_cce_loss(_sparse_cce_loss), mse_loss(_mse_loss),
rmse_loss(_rmse_loss), mae_loss(_mae_loss), start_time(_start_time_micro),
current_time(_current_time_micro) {}

float get_throughput(PerfMetrics const &m) {
return m.train_all / (m.current_time - m.start_time);
Expand Down
27 changes: 27 additions & 0 deletions lib/kernels/test/CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
set(project_target kernel-test)
project(${project_target} LANGUAGES CUDA)

file(GLOB_RECURSE SRC
CONFIGURE_DEPENDS
LIST_DIRECTORIES False
src/*.cc)

add_executable(
${project_target}
${SRC})

target_link_libraries(
${project_target}
kernels
cuda
rapidcheck
doctest::doctest)

set_target_properties(
${project_target}
PROPERTIES
CUDA_STANDARD 11
)

define_ff_vars(${project_target})
doctest_discover_tests(${project_target})
1 change: 1 addition & 0 deletions lib/kernels/test/src/doctest.h
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
#include "doctest/doctest.h"
2 changes: 2 additions & 0 deletions lib/kernels/test/src/main.cc
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
#define DOCTEST_CONFIG_IMPLEMENT_WITH_MAIN
#include "doctest/doctest.h"
84 changes: 84 additions & 0 deletions lib/kernels/test/src/test_accessor.cc
Original file line number Diff line number Diff line change
@@ -0,0 +1,84 @@

#include "doctest.h"
#include "kernels/accessor.h"

using namespace FlexFlow;

TEST_CASE("Test GenericTensorAccessorW") {
float dataW = 3.14f;
GenericTensorAccessorW accessorW{
DataType::FLOAT, ArrayShape(std::vector<std::size_t>{}), &dataW};

// Test get method
CHECK(*accessorW.get<DataType::FLOAT>() == doctest::Approx(3.14f));

// Test specific type get ptr methods
CHECK(get_float_ptr(accessorW) != nullptr);
CHECK(*get_float_ptr(accessorW) == doctest::Approx(3.14f));

// Check runtime error for invalid access
CHECK_THROWS_WITH(accessorW.get<DataType::DOUBLE>(),
"Invalid access data type (FLOAT != DOUBLE)");
}

TEST_CASE("Test GenericTensorAccessorR") {
float dataR = 7.89f;
GenericTensorAccessorR accessorR{
DataType::FLOAT, ArrayShape(std::vector<std::size_t>{}), &dataR};
// Test get method
CHECK(*accessorR.get<DataType::FLOAT>() == doctest::Approx(7.89f));

// Test specific type get ptr methods
CHECK(get_float_ptr(accessorR) != nullptr);
CHECK(*get_float_ptr(accessorR) == doctest::Approx(7.89f));

// Check runtime error for invalid access
CHECK_THROWS_WITH(accessorR.get<DataType::DOUBLE>(),
"Invalid access data type (FLOAT != DOUBLE)");
}

TEST_CASE("Test get_int32_ptr for GenericTensorAccessorW") {
int32_t dataW = 12345;
GenericTensorAccessorW accessorW{
DataType::INT32, ArrayShape(std::vector<std::size_t>{}), &dataW};

// Test get_int32_ptr method
CHECK(get_int32_ptr(accessorW) != nullptr);
CHECK(*get_int32_ptr(accessorW) == 12345);
}

TEST_CASE("Test get_int64_ptr for GenericTensorAccessorW") {
int64_t dataW = 1234567890LL;
GenericTensorAccessorW accessorW{
DataType::INT64, ArrayShape(std::vector<std::size_t>{}), &dataW};
// Test get_int64_ptr method
CHECK(get_int64_ptr(accessorW) != nullptr);
CHECK(*get_int64_ptr(accessorW) == 1234567890LL);
}

TEST_CASE("Test get_float_ptr for GenericTensorAccessorW") {
float dataW = 3.14f;
GenericTensorAccessorW accessorW{
DataType::FLOAT, ArrayShape(std::vector<std::size_t>{}), &dataW};
// Test get_float_ptr method
CHECK(get_float_ptr(accessorW) != nullptr);
CHECK(*get_float_ptr(accessorW) == doctest::Approx(3.14f));
}

TEST_CASE("Test get_double_ptr for GenericTensorAccessorW") {
double dataW = 6.28;
GenericTensorAccessorW accessorW{
DataType::DOUBLE, ArrayShape(std::vector<std::size_t>{}), &dataW};
// Test get_double_ptr method
CHECK(get_double_ptr(accessorW) != nullptr);
CHECK(*get_double_ptr(accessorW) == doctest::Approx(6.28));
}

TEST_CASE("Test get_int32_ptr for GenericTensorAccessorR") {
int32_t dataR = 67890;
GenericTensorAccessorR accessorR{
DataType::INT32, ArrayShape(std::vector<std::size_t>{}), &dataR};
// Test get_int32_ptr method
CHECK(get_int32_ptr(accessorR) != nullptr);
CHECK(*get_int32_ptr(accessorR) == 67890);
}
45 changes: 45 additions & 0 deletions lib/kernels/test/src/test_array_shape.cc
Original file line number Diff line number Diff line change
@@ -0,0 +1,45 @@
#include "doctest.h"
#include "kernels/array_shape.h" // Assuming this is where your ArrayShape is
#include "kernels/legion_dim.h"

using namespace FlexFlow;

TEST_CASE("ArrayShape Initialization and Basic Functions") {
std::vector<std::size_t> dims = {2, 3, 4};
ArrayShape shape(dims);
CHECK(shape.get_dim() == 3);
CHECK(shape.get_volume() == 24);
CHECK(shape.num_elements() == 24);
CHECK(shape.num_dims() == 3);
CHECK(shape[legion_dim_t(1)] == 3);
CHECK(shape.at(legion_dim_t(2)) == 4);
}

TEST_CASE("Negative Indices and Optional Indexing") {
std::vector<std::size_t> dims = {2, 3, 4};
ArrayShape shape(dims);

CHECK(shape.neg_idx(-1) == legion_dim_t(2));
CHECK(shape.neg_idx(-3) == legion_dim_t(0));

CHECK(shape.at_maybe(0) == 2);
CHECK(shape.at_maybe(2) == 4);
CHECK(!shape.at_maybe(5).has_value());
}

TEST_CASE("Reversed Dim Order and Sub-shape") {
using namespace FlexFlow;

std::vector<std::size_t> dims = {2, 3, 4};
ArrayShape shape(dims);

ArrayShape reversed = shape.reversed_dim_order();
CHECK(reversed[legion_dim_t(0)] == 4);
CHECK(reversed[legion_dim_t(1)] == 3);
CHECK(reversed[legion_dim_t(2)] == 2);

ArrayShape sub = shape.sub_shape(legion_dim_t(0), legion_dim_t(2));
CHECK(sub.get_dim() == 2);
CHECK(sub[legion_dim_t(0)] == 2);
CHECK(sub[legion_dim_t(1)] == 3);
}
31 changes: 31 additions & 0 deletions lib/kernels/test/src/test_datatype_dispatch.cc
Original file line number Diff line number Diff line change
@@ -0,0 +1,31 @@
#include "doctest.h"
#include "kernels/datatype_dispatch.h"

using namespace FlexFlow;

template <DataType DT>
struct Function1 {
int operator()(int value) const {
if (DT == DataType::FLOAT) {
return value + 1;
}
if (DT == DataType::DOUBLE) {
return value + 2;
}
return 0;
}
};

TEST_CASE("Testing dispatch function") {
int value = 10;
int result = dispatch<Function1>(DataType::FLOAT, value);
CHECK(result == 11);
}

// test DataTypeDispatch1
TEST_CASE("Testing DataTypeDispatch1") {
DataTypeDispatch1<Function1> dispatcher;
int value = 10;
int result = dispatcher(DataType::FLOAT, value);
CHECK(result == 11);
}
52 changes: 52 additions & 0 deletions lib/kernels/test/src/test_legion_dim.cc
Original file line number Diff line number Diff line change
@@ -0,0 +1,52 @@
#include "doctest.h"
#include "kernels/legion_dim.h"

using namespace FlexFlow;

TEST_CASE("Testing DimOrdered") {
SUBCASE("constructor method") {
DimOrdered<legion_dim_t, int> fromInitList = {1, 2, 3};
CHECK(fromInitList.size() == 3);
std::vector<int> vec = {4, 5, 6};
DimOrdered<legion_dim_t, int> fromVector(vec);
CHECK(fromVector.size() == 3);
}

SUBCASE("at") {
DimOrdered<legion_dim_t, int> dimOrder = {1, 2, 3};
CHECK(dimOrder[legion_dim_t(0)] == 1);
CHECK(dimOrder[legion_dim_t(1)] == 2);
CHECK(dimOrder[legion_dim_t(2)] == 3);
}

SUBCASE("comparsion") {
DimOrdered<legion_dim_t, int> order1 = {1, 2, 3};
DimOrdered<legion_dim_t, int> order2 = {1, 2, 4};
DimOrdered<legion_dim_t, int> order3 = {1, 2, 3};

CHECK(order1 != order2);
CHECK(order1 == order3);
}

SUBCASE("iterator") {
DimOrdered<legion_dim_t, int> dimOrder = {1, 2, 3};
int sum = 0;
for (int value : dimOrder) {
sum += value;
}
CHECK(sum == 6);
}
}

TEST_CASE("Testing LegionTensorDims") {

SUBCASE("LegionTensorDims Basic Operation") {
LegionTensorDims tensorDims = {100, 200};

// tensorDims[legion_dim_t(1)] = 100;
CHECK(tensorDims[legion_dim_t(0)] == 100);

// tensorDims[legion_dim_t(2)] = 200;
CHECK(tensorDims[legion_dim_t(1)] == 200);
}
}
Loading