diff --git a/cpp/include/cudf/binaryop.hpp b/cpp/include/cudf/binaryop.hpp index 177fd904b0b..867f630141b 100644 --- a/cpp/include/cudf/binaryop.hpp +++ b/cpp/include/cudf/binaryop.hpp @@ -214,5 +214,47 @@ cudf::data_type binary_operation_fixed_point_output_type(binary_operator op, cudf::data_type const& lhs, cudf::data_type const& rhs); +namespace binops { + +/** + * @brief Computes output valid mask for op between a column and a scalar + * + * @param col Column to compute the valid mask from + * @param s Scalar to compute the valid mask from + * @param stream CUDA stream used for device memory operations and kernel launches + * @param mr Device memory resource used to allocate the returned valid mask + * @return Computed validity mask + */ +std::pair scalar_col_valid_mask_and( + column_view const& col, + scalar const& s, + rmm::cuda_stream_view stream = cudf::default_stream_value, + rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()); + +namespace compiled { +namespace detail { + +/** + * @brief struct binary operation using `NaN` aware sorting physical element comparators + * + * @param out mutable view of output column + * @param lhs view of left operand column + * @param rhs view of right operand column + * @param is_lhs_scalar true if @p lhs is a single element column representing a scalar + * @param is_rhs_scalar true if @p rhs is a single element column representing a scalar + * @param op binary operator identifier + * @param stream CUDA stream used for device memory operations + */ +void apply_sorting_struct_binary_op(mutable_column_view& out, + column_view const& lhs, + column_view const& rhs, + bool is_lhs_scalar, + bool is_rhs_scalar, + binary_operator op, + rmm::cuda_stream_view stream = cudf::default_stream_value); +} // namespace detail +} // namespace compiled +} // namespace binops + /** @} */ // end of group } // namespace cudf diff --git a/cpp/src/binaryop/binaryop.cpp b/cpp/src/binaryop/binaryop.cpp index e6ed422e5ea..636a844a141 100644 --- a/cpp/src/binaryop/binaryop.cpp +++ b/cpp/src/binaryop/binaryop.cpp @@ -52,19 +52,21 @@ namespace binops { /** * @brief Computes output valid mask for op between a column and a scalar */ -rmm::device_buffer scalar_col_valid_mask_and(column_view const& col, - scalar const& s, - rmm::cuda_stream_view stream, - rmm::mr::device_memory_resource* mr) +std::pair scalar_col_valid_mask_and( + column_view const& col, + scalar const& s, + rmm::cuda_stream_view stream, + rmm::mr::device_memory_resource* mr) { - if (col.is_empty()) return rmm::device_buffer{0, stream, mr}; + if (col.is_empty()) return std::pair(rmm::device_buffer{0, stream, mr}, 0); if (not s.is_valid(stream)) { - return cudf::detail::create_null_mask(col.size(), mask_state::ALL_NULL, stream, mr); + return std::pair(cudf::detail::create_null_mask(col.size(), mask_state::ALL_NULL, stream, mr), + col.size()); } else if (s.is_valid(stream) and col.nullable()) { - return cudf::detail::copy_bitmask(col, stream, mr); + return std::pair(cudf::detail::copy_bitmask(col, stream, mr), col.null_count()); } else { - return rmm::device_buffer{0, stream, mr}; + return std::pair(rmm::device_buffer{0, stream, mr}, 0); } } @@ -253,9 +255,9 @@ std::unique_ptr make_fixed_width_column_for_output(scalar const& lhs, if (binops::is_null_dependent(op)) { return make_fixed_width_column(output_type, rhs.size(), mask_state::ALL_VALID, stream, mr); } else { - auto new_mask = binops::scalar_col_valid_mask_and(rhs, lhs, stream, mr); + auto [new_mask, new_null_count] = binops::scalar_col_valid_mask_and(rhs, lhs, stream, mr); return make_fixed_width_column( - output_type, rhs.size(), std::move(new_mask), cudf::UNKNOWN_NULL_COUNT, stream, mr); + output_type, rhs.size(), std::move(new_mask), new_null_count, stream, mr); } }; @@ -280,9 +282,9 @@ std::unique_ptr make_fixed_width_column_for_output(column_view const& lh if (binops::is_null_dependent(op)) { return make_fixed_width_column(output_type, lhs.size(), mask_state::ALL_VALID, stream, mr); } else { - auto new_mask = binops::scalar_col_valid_mask_and(lhs, rhs, stream, mr); + auto [new_mask, new_null_count] = binops::scalar_col_valid_mask_and(lhs, rhs, stream, mr); return make_fixed_width_column( - output_type, lhs.size(), std::move(new_mask), cudf::UNKNOWN_NULL_COUNT, stream, mr); + output_type, lhs.size(), std::move(new_mask), new_null_count, stream, mr); } }; diff --git a/cpp/src/binaryop/compiled/binary_ops.cu b/cpp/src/binaryop/compiled/binary_ops.cu index ee9fe840fd6..f16d1b99219 100644 --- a/cpp/src/binaryop/compiled/binary_ops.cu +++ b/cpp/src/binaryop/compiled/binary_ops.cu @@ -16,9 +16,11 @@ #include "binary_ops.hpp" #include "operation.cuh" +#include "struct_binary_ops.cuh" #include #include +#include #include #include @@ -44,7 +46,7 @@ namespace { */ struct scalar_as_column_view { using return_type = typename std::pair>; - template ())>* = nullptr> + template ())> return_type operator()(scalar const& s, rmm::cuda_stream_view, rmm::mr::device_memory_resource*) { auto& h_scalar_type_view = static_cast&>(const_cast(s)); @@ -52,7 +54,7 @@ struct scalar_as_column_view { column_view(s.type(), 1, h_scalar_type_view.data(), (bitmask_type const*)s.validity_data()); return std::pair{col_v, std::unique_ptr(nullptr)}; } - template ())>* = nullptr> + template ())> return_type operator()(scalar const&, rmm::cuda_stream_view, rmm::mr::device_memory_resource*) { CUDF_FAIL("Unsupported type"); @@ -85,6 +87,15 @@ scalar_as_column_view::return_type scalar_as_column_view::operator() +scalar_as_column_view::return_type scalar_as_column_view::operator()( + scalar const& s, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) +{ + auto col = make_column_from_scalar(s, 1, stream, mr); + return std::pair{col->view(), std::move(col)}; +} + /** * @brief Converts scalar to column_view with single element. * @@ -375,6 +386,79 @@ void binary_operation(mutable_column_view& out, auto [rhsv, aux] = scalar_to_column_view(rhs, stream); operator_dispatcher(out, lhs, rhsv, false, true, op, stream); } + +namespace detail { +void apply_sorting_struct_binary_op(mutable_column_view& out, + column_view const& lhs, + column_view const& rhs, + bool is_lhs_scalar, + bool is_rhs_scalar, + binary_operator op, + rmm::cuda_stream_view stream) +{ + CUDF_EXPECTS(lhs.type().id() == type_id::STRUCT && rhs.type().id() == type_id::STRUCT, + "Both columns must be struct columns"); + CUDF_EXPECTS(!cudf::structs::detail::is_or_has_nested_lists(lhs) and + !cudf::structs::detail::is_or_has_nested_lists(rhs), + "List type is not supported"); + // Struct child column type and structure mismatches are caught within the two_table_comparator + switch (op) { + case binary_operator::EQUAL: [[fallthrough]]; + case binary_operator::NOT_EQUAL: + detail::apply_struct_equality_op( + out, + lhs, + rhs, + is_lhs_scalar, + is_rhs_scalar, + op, + cudf::experimental::row::equality::nan_equal_physical_equality_comparator{}, + stream); + break; + case binary_operator::LESS: + detail::apply_struct_binary_op( + out, + lhs, + rhs, + is_lhs_scalar, + is_rhs_scalar, + cudf::experimental::row::lexicographic::sorting_physical_element_comparator{}, + stream); + break; + case binary_operator::GREATER: + detail::apply_struct_binary_op( + out, + lhs, + rhs, + is_lhs_scalar, + is_rhs_scalar, + cudf::experimental::row::lexicographic::sorting_physical_element_comparator{}, + stream); + break; + case binary_operator::LESS_EQUAL: + detail::apply_struct_binary_op( + out, + lhs, + rhs, + is_lhs_scalar, + is_rhs_scalar, + cudf::experimental::row::lexicographic::sorting_physical_element_comparator{}, + stream); + break; + case binary_operator::GREATER_EQUAL: + detail::apply_struct_binary_op( + out, + lhs, + rhs, + is_lhs_scalar, + is_rhs_scalar, + cudf::experimental::row::lexicographic::sorting_physical_element_comparator{}, + stream); + break; + default: CUDF_FAIL("Unsupported operator for structs"); + } +} +} // namespace detail } // namespace compiled } // namespace binops } // namespace cudf diff --git a/cpp/src/binaryop/compiled/binary_ops.cuh b/cpp/src/binaryop/compiled/binary_ops.cuh index d88d2be2499..9b1163c1169 100644 --- a/cpp/src/binaryop/compiled/binary_ops.cuh +++ b/cpp/src/binaryop/compiled/binary_ops.cuh @@ -272,10 +272,7 @@ void for_each(rmm::cuda_stream_view stream, cudf::size_type size, Functor f) const int grid_size = util::div_rounding_up_safe(size, 2 * block_size); for_each_kernel<<>>(size, std::forward(f)); } -namespace detail { -template -inline constexpr bool is_any_v = std::disjunction...>::value; -} + template void apply_binary_op(mutable_column_view& out, column_view const& lhs, diff --git a/cpp/src/binaryop/compiled/struct_binary_ops.cuh b/cpp/src/binaryop/compiled/struct_binary_ops.cuh new file mode 100644 index 00000000000..b9d9477d1a4 --- /dev/null +++ b/cpp/src/binaryop/compiled/struct_binary_ops.cuh @@ -0,0 +1,139 @@ +/* + * Copyright (c) 2022, NVIDIA CORPORATION. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#pragma once + +#include "binary_ops.hpp" +#include "operation.cuh" + +#include +#include +#include +#include +#include + +#include +#include + +namespace cudf::binops::compiled::detail { +template +inline constexpr bool is_any_v = std::disjunction...>::value; + +template +struct device_comparison_functor { + // Explicit constructor definition required to avoid a "no instance of constructor" compilation + // error + device_comparison_functor(OptionalIterator const optional_iter, + bool const is_lhs_scalar, + bool const is_rhs_scalar, + DeviceComparator const& comparator) + : _optional_iter(optional_iter), + _is_lhs_scalar(is_lhs_scalar), + _is_rhs_scalar(is_rhs_scalar), + _comparator(comparator) + { + } + + bool __device__ operator()(size_type i) + { + return _optional_iter[i].has_value() && + _comparator(cudf::experimental::row::lhs_index_type{_is_lhs_scalar ? 0 : i}, + cudf::experimental::row::rhs_index_type{_is_rhs_scalar ? 0 : i}); + } + + OptionalIterator const _optional_iter; + bool const _is_lhs_scalar; + bool const _is_rhs_scalar; + DeviceComparator const _comparator; +}; + +template +void apply_struct_binary_op(mutable_column_view& out, + column_view const& lhs, + column_view const& rhs, + bool is_lhs_scalar, + bool is_rhs_scalar, + PhysicalElementComparator comparator = {}, + rmm::cuda_stream_view stream = cudf::default_stream_value) +{ + auto const compare_orders = std::vector( + lhs.size(), + is_any_v ? order::DESCENDING + : order::ASCENDING); + auto const tlhs = table_view{{lhs}}; + auto const trhs = table_view{{rhs}}; + auto const table_comparator = cudf::experimental::row::lexicographic::two_table_comparator{ + tlhs, trhs, compare_orders, {}, stream}; + auto outd = column_device_view::create(out, stream); + auto optional_iter = + cudf::detail::make_optional_iterator(*outd, nullate::DYNAMIC{out.has_nulls()}); + auto const comparator_nulls = nullate::DYNAMIC{has_nested_nulls(tlhs) || has_nested_nulls(trhs)}; + + auto tabulate_device_operator = [&](auto device_comparator) { + thrust::tabulate( + rmm::exec_policy(stream), + out.begin(), + out.end(), + device_comparison_functor{optional_iter, is_lhs_scalar, is_rhs_scalar, device_comparator}); + }; + is_any_v + ? tabulate_device_operator(table_comparator.less_equivalent(comparator_nulls, comparator)) + : tabulate_device_operator(table_comparator.less(comparator_nulls, comparator)); +} + +template +void apply_struct_equality_op(mutable_column_view& out, + column_view const& lhs, + column_view const& rhs, + bool is_lhs_scalar, + bool is_rhs_scalar, + binary_operator op, + PhysicalEqualityComparator comparator = {}, + rmm::cuda_stream_view stream = cudf::default_stream_value) +{ + CUDF_EXPECTS(op == binary_operator::EQUAL || op == binary_operator::NOT_EQUAL, + "Unsupported operator for these types"); + + auto tlhs = table_view{{lhs}}; + auto trhs = table_view{{rhs}}; + auto table_comparator = + cudf::experimental::row::equality::two_table_comparator{tlhs, trhs, stream}; + auto device_comparator = + table_comparator.equal_to(nullate::DYNAMIC{has_nested_nulls(tlhs) || has_nested_nulls(trhs)}, + null_equality::EQUAL, + comparator); + + auto outd = column_device_view::create(out, stream); + auto optional_iter = + cudf::detail::make_optional_iterator(*outd, nullate::DYNAMIC{out.has_nulls()}); + thrust::tabulate(rmm::exec_policy(stream), + out.begin(), + out.end(), + [optional_iter, + is_lhs_scalar, + is_rhs_scalar, + preserve_output = (op != binary_operator::NOT_EQUAL), + device_comparator] __device__(size_type i) { + auto lhs = cudf::experimental::row::lhs_index_type{is_lhs_scalar ? 0 : i}; + auto rhs = cudf::experimental::row::rhs_index_type{is_rhs_scalar ? 0 : i}; + return optional_iter[i].has_value() and + (device_comparator(lhs, rhs) == preserve_output); + }); +} +} // namespace cudf::binops::compiled::detail diff --git a/java/src/main/native/include/maps_column_view.hpp b/java/src/main/native/include/maps_column_view.hpp index a7733b5da69..b9b60f4e3b2 100644 --- a/java/src/main/native/include/maps_column_view.hpp +++ b/java/src/main/native/include/maps_column_view.hpp @@ -141,7 +141,7 @@ class maps_column_view { */ std::unique_ptr - contains(column_view const &key, rmm::cuda_stream_view stream = rmm::cuda_stream_default, + contains(column_view const &key, rmm::cuda_stream_view stream = cudf::default_stream_value, rmm::mr::device_memory_resource *mr = rmm::mr::get_current_device_resource()) const; private: diff --git a/java/src/main/native/src/ColumnViewJni.cpp b/java/src/main/native/src/ColumnViewJni.cpp index 4659d85fa17..eeff2e1b0e2 100644 --- a/java/src/main/native/src/ColumnViewJni.cpp +++ b/java/src/main/native/src/ColumnViewJni.cpp @@ -24,6 +24,7 @@ #include #include #include +#include #include #include #include @@ -1290,9 +1291,18 @@ JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_ColumnView_binaryOpVV(JNIEnv *env, j cudf::jni::auto_set_device(env); auto lhs = reinterpret_cast(lhs_view); auto rhs = reinterpret_cast(rhs_view); - cudf::data_type n_data_type = cudf::jni::make_data_type(out_dtype, scale); cudf::binary_operator op = static_cast(int_op); + + if (lhs->type().id() == cudf::type_id::STRUCT) { + auto [new_mask, null_count] = cudf::bitmask_and(cudf::table_view{{*lhs, *rhs}}); + auto out = make_fixed_width_column(n_data_type, lhs->size(), std::move(new_mask), null_count); + auto out_view = out->mutable_view(); + cudf::binops::compiled::detail::apply_sorting_struct_binary_op(out_view, *lhs, *rhs, false, + false, op); + return release_as_jlong(out); + } + return release_as_jlong(cudf::binary_operation(*lhs, *rhs, op, n_data_type)); } CATCH_STD(env, 0); @@ -1321,8 +1331,19 @@ JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_ColumnView_binaryOpVS(JNIEnv *env, j auto lhs = reinterpret_cast(lhs_view); cudf::scalar *rhs = reinterpret_cast(rhs_ptr); cudf::data_type n_data_type = cudf::jni::make_data_type(out_dtype, scale); - cudf::binary_operator op = static_cast(int_op); + + if (lhs->type().id() == cudf::type_id::STRUCT) { + auto [new_mask, new_null_count] = cudf::binops::scalar_col_valid_mask_and(*lhs, *rhs); + auto out = + make_fixed_width_column(n_data_type, lhs->size(), std::move(new_mask), new_null_count); + auto rhsv = cudf::make_column_from_scalar(*rhs, 1); + auto out_view = out->mutable_view(); + cudf::binops::compiled::detail::apply_sorting_struct_binary_op(out_view, *lhs, rhsv->view(), + false, true, op); + return release_as_jlong(out); + } + return release_as_jlong(cudf::binary_operation(*lhs, *rhs, op, n_data_type)); } CATCH_STD(env, 0); diff --git a/java/src/main/native/src/ScalarJni.cpp b/java/src/main/native/src/ScalarJni.cpp index b00b066742a..9af3edd0356 100644 --- a/java/src/main/native/src/ScalarJni.cpp +++ b/java/src/main/native/src/ScalarJni.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2021, NVIDIA CORPORATION. + * Copyright (c) 2019-2022, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -15,13 +15,17 @@ */ #include +#include #include #include #include +#include #include "cudf_jni_apis.hpp" #include "dtype_utils.hpp" +using cudf::jni::release_as_jlong; + extern "C" { JNIEXPORT void JNICALL Java_ai_rapids_cudf_Scalar_closeScalar(JNIEnv *env, jclass, @@ -496,10 +500,20 @@ JNIEXPORT jlong JNICALL Java_ai_rapids_cudf_Scalar_binaryOpSV(JNIEnv *env, jclas cudf::scalar *lhs = reinterpret_cast(lhs_ptr); auto rhs = reinterpret_cast(rhs_view); cudf::data_type n_data_type = cudf::jni::make_data_type(out_dtype, scale); - cudf::binary_operator op = static_cast(int_op); - std::unique_ptr result = cudf::binary_operation(*lhs, *rhs, op, n_data_type); - return reinterpret_cast(result.release()); + + if (lhs->type().id() == cudf::type_id::STRUCT) { + auto [new_mask, new_null_count] = cudf::binops::scalar_col_valid_mask_and(*rhs, *lhs); + auto out = + make_fixed_width_column(n_data_type, rhs->size(), std::move(new_mask), new_null_count); + auto lhs_col = cudf::make_column_from_scalar(*lhs, 1); + auto out_view = out->mutable_view(); + cudf::binops::compiled::detail::apply_sorting_struct_binary_op(out_view, lhs_col->view(), + *rhs, true, false, op); + return release_as_jlong(out); + } + + return release_as_jlong(cudf::binary_operation(*lhs, *rhs, op, n_data_type)); } CATCH_STD(env, 0); } diff --git a/java/src/test/java/ai/rapids/cudf/BinaryOpTest.java b/java/src/test/java/ai/rapids/cudf/BinaryOpTest.java index 862f3860d3d..35d1cb39324 100644 --- a/java/src/test/java/ai/rapids/cudf/BinaryOpTest.java +++ b/java/src/test/java/ai/rapids/cudf/BinaryOpTest.java @@ -91,12 +91,16 @@ interface CpuOpSV { } public static ColumnVector forEach(DType retType, ColumnVector lhs, ColumnVector rhs, CpuOpVV op) { + return forEach(retType, lhs, rhs, op, false); + } + + public static ColumnVector forEach(DType retType, ColumnVector lhs, ColumnVector rhs, CpuOpVV op, boolean evalNulls) { int len = (int)lhs.getRowCount(); try (HostColumnVector hostLHS = lhs.copyToHost(); HostColumnVector hostRHS = rhs.copyToHost(); Builder builder = HostColumnVector.builder(retType, len)) { for (int i = 0; i < len; i++) { - if (hostLHS.isNull(i) || hostRHS.isNull(i)) { + if (!evalNulls && (hostLHS.isNull(i) || hostRHS.isNull(i))) { builder.appendNull(); } else { op.computeNullSafe(builder, hostLHS, hostRHS, i); @@ -107,11 +111,15 @@ public static ColumnVector forEach(DType retType, ColumnVector lhs, ColumnVector } public static ColumnVector forEachS(DType retType, ColumnVector lhs, S rhs, CpuOpVS op) { + return forEachS(retType, lhs, rhs, op, false); + } + + public static ColumnVector forEachS(DType retType, ColumnVector lhs, S rhs, CpuOpVS op, boolean evalNulls) { int len = (int)lhs.getRowCount(); try (HostColumnVector hostLHS = lhs.copyToHost(); Builder builder = HostColumnVector.builder(retType, len)) { for (int i = 0; i < len; i++) { - if (hostLHS.isNull(i) || rhs == null) { + if (!evalNulls && (hostLHS.isNull(i) || rhs == null)) { builder.appendNull(); } else { op.computeNullSafe(builder, hostLHS, rhs, i); @@ -122,11 +130,15 @@ public static ColumnVector forEachS(DType retType, ColumnVector lhs, S rhs, } public static ColumnVector forEachS(DType retType, S lhs, ColumnVector rhs, CpuOpSV op) { + return forEachS(retType, lhs, rhs, op, false); + } + + public static ColumnVector forEachS(DType retType, S lhs, ColumnVector rhs, CpuOpSV op, boolean evalNulls) { int len = (int)rhs.getRowCount(); try (HostColumnVector hostRHS = rhs.copyToHost(); Builder builder = HostColumnVector.builder(retType, len)) { for (int i = 0; i < len; i++) { - if (hostRHS.isNull(i) || lhs == null) { + if (!evalNulls && (hostRHS.isNull(i) || lhs == null)) { builder.appendNull(); } else { op.computeNullSafe(builder, lhs, hostRHS, i); @@ -872,12 +884,16 @@ public void testStringEqualScalarNotPresent() { @Test public void testNotEqual() { - try (ColumnVector icv = ColumnVector.fromBoxedInts(INTS_1); + try (ColumnVector icv1 = ColumnVector.fromBoxedInts(INTS_1); + ColumnVector icv2 = ColumnVector.fromBoxedInts(INTS_2); + ColumnVector structcv1 = ColumnVector.makeStruct(icv1); + ColumnVector structcv2 = ColumnVector.makeStruct(icv2); + ColumnVector intscalar = ColumnVector.fromInts(4); ColumnVector dcv = ColumnVector.fromBoxedDoubles(DOUBLES_1); ColumnVector dec32cv_1 = ColumnVector.decimalFromInts(-dec32Scale_1, DECIMAL32_1); ColumnVector dec32cv_2 = ColumnVector.decimalFromInts(-dec32Scale_2, DECIMAL32_2)) { - try (ColumnVector answer = icv.notEqualTo(dcv); - ColumnVector expected = forEach(DType.BOOL8, icv, dcv, + try (ColumnVector answer = icv1.notEqualTo(dcv); + ColumnVector expected = forEach(DType.BOOL8, icv1, dcv, (b, l, r, i) -> b.append(l.getInt(i) != r.getDouble(i)))) { assertColumnsAreEqual(expected, answer, "int32 != double"); } @@ -897,18 +913,38 @@ public void testNotEqual() { } try (Scalar s = Scalar.fromFloat(1.0f); - ColumnVector answer = icv.notEqualTo(s); - ColumnVector expected = forEachS(DType.BOOL8, icv, 1.0f, + ColumnVector answer = icv1.notEqualTo(s); + ColumnVector expected = forEachS(DType.BOOL8, icv1, 1.0f, (b, l, r, i) -> b.append(l.getInt(i) != r))) { assertColumnsAreEqual(expected, answer, "int64 != scalar float"); } try (Scalar s = Scalar.fromShort((short) 100); - ColumnVector answer = s.notEqualTo(icv); - ColumnVector expected = forEachS(DType.BOOL8, (short) 100, icv, + ColumnVector answer = s.notEqualTo(icv1); + ColumnVector expected = forEachS(DType.BOOL8, (short) 100, icv1, (b, l, r, i) -> b.append(l != r.getInt(i)))) { assertColumnsAreEqual(expected, answer, "scalar short != int32"); } + + try (Scalar s = Scalar.structFromColumnViews(intscalar); + ColumnVector answersv = s.notEqualTo(structcv1); + ColumnVector expectedsv = forEachS(DType.BOOL8, 4, icv1, + (b, l, r, i) -> b.append(r.isNull(i) ? true : l != r.getInt(i)), true)) { + assertColumnsAreEqual(expectedsv, answersv, "scalar struct int32 != struct int32"); + } + + try (Scalar s = Scalar.structFromColumnViews(intscalar); + ColumnVector answervs = structcv1.notEqualTo(s); + ColumnVector expectedvs = forEachS(DType.BOOL8, icv1, 4, + (b, l, r, i) -> b.append(l.isNull(i) ? true : l.getInt(i) != r), true)) { + assertColumnsAreEqual(expectedvs, answervs, "struct int32 != scalar struct int32"); + } + + try (ColumnVector answervv = structcv1.notEqualTo(structcv2); + ColumnVector expectedvv = forEach(DType.BOOL8, icv1, icv2, + (b, l, r, i) -> b.append(l.isNull(i) ? !r.isNull(i) : r.isNull(i) || l.getInt(i) != r.getInt(i)), true)) { + assertColumnsAreEqual(expectedvv, answervv, "struct int32 != struct int32"); + } } } @@ -1039,12 +1075,16 @@ public void testStringLessThanScalarNotPresent() { @Test public void testGreaterThan() { - try (ColumnVector icv = ColumnVector.fromBoxedInts(INTS_1); + try (ColumnVector icv1 = ColumnVector.fromBoxedInts(INTS_1); + ColumnVector icv2 = ColumnVector.fromBoxedInts(INTS_2); + ColumnVector structcv1 = ColumnVector.makeStruct(icv1); + ColumnVector structcv2 = ColumnVector.makeStruct(icv2); + ColumnVector intscalar = ColumnVector.fromInts(4); ColumnVector dcv = ColumnVector.fromBoxedDoubles(DOUBLES_1); ColumnVector dec32cv1 = ColumnVector.fromDecimals(BIGDECIMAL32_1); ColumnVector dec32cv2 = ColumnVector.fromDecimals(BIGDECIMAL32_2)) { - try (ColumnVector answer = icv.greaterThan(dcv); - ColumnVector expected = forEach(DType.BOOL8, icv, dcv, + try (ColumnVector answer = icv1.greaterThan(dcv); + ColumnVector expected = forEach(DType.BOOL8, icv1, dcv, (b, l, r, i) -> b.append(l.getInt(i) > r.getDouble(i)))) { assertColumnsAreEqual(expected, answer, "int32 > double"); } @@ -1056,18 +1096,38 @@ public void testGreaterThan() { } try (Scalar s = Scalar.fromFloat(1.0f); - ColumnVector answer = icv.greaterThan(s); - ColumnVector expected = forEachS(DType.BOOL8, icv, 1.0f, + ColumnVector answer = icv1.greaterThan(s); + ColumnVector expected = forEachS(DType.BOOL8, icv1, 1.0f, (b, l, r, i) -> b.append(l.getInt(i) > r))) { assertColumnsAreEqual(expected, answer, "int64 > scalar float"); } try (Scalar s = Scalar.fromShort((short) 100); - ColumnVector answer = s.greaterThan(icv); - ColumnVector expected = forEachS(DType.BOOL8, (short) 100, icv, + ColumnVector answer = s.greaterThan(icv1); + ColumnVector expected = forEachS(DType.BOOL8, (short) 100, icv1, (b, l, r, i) -> b.append(l > r.getInt(i)))) { assertColumnsAreEqual(expected, answer, "scalar short > int32"); } + + try (Scalar s = Scalar.structFromColumnViews(intscalar); + ColumnVector answersv = s.greaterThan(structcv1); + ColumnVector expectedsv = forEachS(DType.BOOL8, 4, icv1, + (b, l, r, i) -> b.append(r.isNull(i) ? true : l > r.getInt(i)), true)) { + assertColumnsAreEqual(expectedsv, answersv, "scalar struct int32 > struct int32"); + } + + try (Scalar s = Scalar.structFromColumnViews(intscalar); + ColumnVector answervs = structcv1.greaterThan(s); + ColumnVector expectedvs = forEachS(DType.BOOL8, icv1, 4, + (b, l, r, i) -> b.append(l.isNull(i) ? false : l.getInt(i) > r), true)) { + assertColumnsAreEqual(expectedvs, answervs, "struct int32 > scalar struct int32"); + } + + try (ColumnVector answervv = structcv1.greaterThan(structcv2); + ColumnVector expectedvv = forEach(DType.BOOL8, icv1, icv2, + (b, l, r, i) -> b.append(l.isNull(i) ? false : r.isNull(i) || l.getInt(i) > r.getInt(i)), true)) { + assertColumnsAreEqual(expectedvv, answervv, "struct int32 > struct int32"); + } } } @@ -1121,25 +1181,29 @@ public void testStringGreaterThanScalarNotPresent() { @Test public void testLessOrEqualTo() { - try (ColumnVector icv = ColumnVector.fromBoxedInts(INTS_1); + try (ColumnVector icv1 = ColumnVector.fromBoxedInts(INTS_1); + ColumnVector icv2 = ColumnVector.fromBoxedInts(INTS_2); + ColumnVector structcv1 = ColumnVector.makeStruct(icv1); + ColumnVector structcv2 = ColumnVector.makeStruct(icv2); + ColumnVector intscalar = ColumnVector.fromInts(4); ColumnVector dcv = ColumnVector.fromBoxedDoubles(DOUBLES_1); ColumnVector dec32cv = ColumnVector.decimalFromInts(-dec32Scale_2, DECIMAL32_2)) { - try (ColumnVector answer = icv.lessOrEqualTo(dcv); - ColumnVector expected = forEach(DType.BOOL8, icv, dcv, + try (ColumnVector answer = icv1.lessOrEqualTo(dcv); + ColumnVector expected = forEach(DType.BOOL8, icv1, dcv, (b, l, r, i) -> b.append(l.getInt(i) <= r.getDouble(i)))) { assertColumnsAreEqual(expected, answer, "int32 <= double"); } try (Scalar s = Scalar.fromFloat(1.0f); - ColumnVector answer = icv.lessOrEqualTo(s); - ColumnVector expected = forEachS(DType.BOOL8, icv, 1.0f, + ColumnVector answer = icv1.lessOrEqualTo(s); + ColumnVector expected = forEachS(DType.BOOL8, icv1, 1.0f, (b, l, r, i) -> b.append(l.getInt(i) <= r))) { assertColumnsAreEqual(expected, answer, "int64 <= scalar float"); } try (Scalar s = Scalar.fromShort((short) 100); - ColumnVector answer = s.lessOrEqualTo(icv); - ColumnVector expected = forEachS(DType.BOOL8, (short) 100, icv, + ColumnVector answer = s.lessOrEqualTo(icv1); + ColumnVector expected = forEachS(DType.BOOL8, (short) 100, icv1, (b, l, r, i) -> b.append(l <= r.getInt(i)))) { assertColumnsAreEqual(expected, answer, "scalar short <= int32"); } @@ -1151,6 +1215,26 @@ public void testLessOrEqualTo() { assertColumnsAreEqual(expected, answer, "dec32 <= scalar dec32"); } } + + try (Scalar s = Scalar.structFromColumnViews(intscalar); + ColumnVector answersv = s.lessOrEqualTo(structcv1); + ColumnVector expectedsv = forEachS(DType.BOOL8, 4, icv1, + (b, l, r, i) -> b.append(r.isNull(i) ? false : l <= r.getInt(i)), true)) { + assertColumnsAreEqual(expectedsv, answersv, "scalar struct int32 <= struct int32"); + } + + try (Scalar s = Scalar.structFromColumnViews(intscalar); + ColumnVector answervs = structcv1.lessOrEqualTo(s); + ColumnVector expectedvs = forEachS(DType.BOOL8, icv1, 4, + (b, l, r, i) -> b.append(l.isNull(i) ? true : l.getInt(i) <= r), true)) { + assertColumnsAreEqual(expectedvs, answervs, "struct int32 <= scalar struct int32"); + } + + try (ColumnVector answervv = structcv1.lessOrEqualTo(structcv2); + ColumnVector expectedvv = forEach(DType.BOOL8, icv1, icv2, + (b, l, r, i) -> b.append(l.isNull(i) ? true : !r.isNull(i) && l.getInt(i) <= r.getInt(i)), true)) { + assertColumnsAreEqual(expectedvv, answervv, "struct int32 <= struct int32"); + } } }