Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

MINIFICPP-2185 Upgrade to Visual Studio 2022 #1635

Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
69 changes: 60 additions & 9 deletions .github/workflows/ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -69,10 +69,10 @@ jobs:
with:
name: macos-binaries
path: build/bin
windows_VS2019:
name: "windows-2019"
runs-on: windows-2019
timeout-minutes: 180
windows_VS2022:
name: "windows-2022"
runs-on: windows-2022
timeout-minutes: 240
steps:
- name: Support longpaths
run: git config --system core.longpaths true
Expand All @@ -82,11 +82,11 @@ jobs:
uses: actions/cache/restore@v3
with:
path: ~/AppData/Local/Mozilla/sccache/cache
key: ${{ runner.os }}-sccache-${{ github.ref }}-${{ github.sha }}
key: ${{ runner.os }}-2022-sccache-${{ github.ref }}-${{ github.sha }}
restore-keys: |
${{ runner.os }}-sccache-${{ github.ref }}
${{ runner.os }}-sccache-refs/heads/main
${{ runner.os }}-sccache
${{ runner.os }}-2022-sccache-${{ github.ref }}
${{ runner.os }}-2022-sccache-refs/heads/main
${{ runner.os }}-2022-sccache
- name: Run sccache-cache
uses: mozilla-actions/[email protected]
- name: Install ninja-build tool
Expand Down Expand Up @@ -116,13 +116,64 @@ jobs:
if: always()
with:
path: ~/AppData/Local/Mozilla/sccache/cache
key: ${{ runner.os }}-sccache-${{ github.ref }}-${{ github.sha }}
key: ${{ runner.os }}-2022-sccache-${{ github.ref }}-${{ github.sha }}
- name: test
run: cd ..\b && ctest --timeout 300 --parallel %NUMBER_OF_PROCESSORS% -C Release --output-on-failure
shell: cmd
- name: linter
run: cd ..\b && cmake --build . --target linter --config Release -j 8
shell: cmd
windows_VS2019:
lordgamez marked this conversation as resolved.
Show resolved Hide resolved
name: "windows-2019"
runs-on: windows-2019
timeout-minutes: 240
steps:
- name: Support longpaths
run: git config --system core.longpaths true
- id: checkout
uses: actions/checkout@v3
- name: sccache cache restore
uses: actions/cache/restore@v3
with:
path: ~/AppData/Local/Mozilla/sccache/cache
key: ${{ runner.os }}-2019-sccache-${{ github.ref }}-${{ github.sha }}
restore-keys: |
${{ runner.os }}-2019-sccache-${{ github.ref }}
${{ runner.os }}-2019-sccache-refs/heads/main
${{ runner.os }}-2019-sccache
- name: Run sccache-cache
uses: mozilla-actions/[email protected]
- name: Install ninja-build tool
uses: seanmiddleditch/gha-setup-ninja@v3
- name: Set up Python
uses: actions/setup-python@v4
with:
python-version: '3.11'
- name: Set up Lua
uses: xpol/[email protected]
- name: Set up NASM for OpenSSL
uses: ilammy/setup-nasm@v1
- id: install-sqliteodbc-driver
run: |
Invoke-WebRequest -Uri "http://www.ch-werner.de/sqliteodbc/sqliteodbc_w64.exe" -OutFile "sqliteodbc_w64.exe"
if ((Get-FileHash 'sqliteodbc_w64.exe').Hash -ne "0df79be4a4412542839ebf405b20d95a7dfc803da0b0b6b0dc653d30dc82ee84") {Write "Hash mismatch"; Exit 1}
./sqliteodbc_w64.exe /S
shell: powershell
- name: build
run: |
for /f "usebackq delims=" %%i in (`vswhere.exe -latest -property installationPath`) do if exist "%%i\Common7\Tools\vsdevcmd.bat" call "%%i\Common7\Tools\vsdevcmd.bat" -arch=x64 -host_arch=x64
win_build_vs.bat ..\b /64 /CI /S /A /PDH /SPLUNK /GCP /ELASTIC /K /L /R /Z /N /RO /PR /PYTHON_SCRIPTING /LUA_SCRIPTING /MQTT /SCCACHE /NINJA
sccache --show-stats
shell: cmd
- name: sccache cache save
uses: actions/cache/save@v3
if: always()
with:
path: ~/AppData/Local/Mozilla/sccache/cache
key: ${{ runner.os }}-2019-sccache-${{ github.ref }}-${{ github.sha }}
- name: test
run: cd ..\b && ctest --timeout 300 --parallel %NUMBER_OF_PROCESSORS% -C Release --output-on-failure
shell: cmd
ubuntu_20_04:
name: "ubuntu-20.04"
runs-on: ubuntu-20.04
Expand Down
2 changes: 1 addition & 1 deletion CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -106,7 +106,7 @@ endif()
# Enable usage of the VERSION specifier
if (WIN32)
add_compile_definitions(WIN32_LEAN_AND_MEAN _CRT_SECURE_NO_WARNINGS NOMINMAX)
add_compile_options(/W3 /utf-8 /bigobj)
add_compile_options(/W3 /utf-8 /bigobj /MP)
endif()

if (NOT PORTABLE)
Expand Down
8 changes: 7 additions & 1 deletion cmake/BundledOSSPUUID.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,13 @@ function(use_bundled_osspuuid SOURCE_DIR BINARY_DIR)
ENDFOREACH(BYPRODUCT)

# Build project
set(CONFIGURE_COMMAND ./configure "CC=${CMAKE_C_COMPILER}" "CXX=${CMAKE_CXX_COMPILER}" "CFLAGS=${PASSTHROUGH_CMAKE_C_FLAGS} -fPIC" "CXXFLAGS=${PASSTHROUGH_CMAKE_CXX_FLAGS} -fPIC" --enable-shared=no --with-cxx --without-perl --without-php --without-pgsql "--prefix=${BINARY_DIR}/thirdparty/ossp-uuid-install")
if(WIN32)
set(ADDITIONAL_COMPILER_FLAGS "")
else()
set(ADDITIONAL_COMPILER_FLAGS "-fPIC")
endif()
set(CONFIGURE_COMMAND ./configure "CC=${CMAKE_C_COMPILER}" "CXX=${CMAKE_CXX_COMPILER}" "CFLAGS=${PASSTHROUGH_CMAKE_C_FLAGS} ${ADDITIONAL_COMPILER_FLAGS}" "CXXFLAGS=${PASSTHROUGH_CMAKE_CXX_FLAGS} ${ADDITIONAL_COMPILER_FLAGS}" --enable-shared=no --with-cxx --without-perl --without-php --without-pgsql "--prefix=${BINARY_DIR}/thirdparty/ossp-uuid-install")

string(TOLOWER "${CMAKE_BUILD_TYPE}" build_type)
if(NOT build_type MATCHES debug)
list(APPEND CONFIGURE_COMMAND --enable-debug=yes)
Expand Down
2 changes: 1 addition & 1 deletion cmake/BundledOpenSSL.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,7 @@ function(use_openssl SOURCE_DIR BINARY_DIR)
URL_HASH "SHA256=b3aa61334233b852b63ddb048df181177c2c659eb9d4376008118f9c08d07674"
SOURCE_DIR "${BINARY_DIR}/thirdparty/openssl-src"
BUILD_IN_SOURCE true
CONFIGURE_COMMAND perl Configure "CFLAGS=${PASSTHROUGH_CMAKE_C_FLAGS} -fPIC" "CXXFLAGS=${PASSTHROUGH_CMAKE_CXX_FLAGS} -fPIC" ${OPENSSL_SHARED_FLAG} no-tests "--prefix=${OPENSSL_BIN_DIR}" "--openssldir=${OPENSSL_BIN_DIR}"
CONFIGURE_COMMAND perl Configure "CFLAGS=${PASSTHROUGH_CMAKE_C_FLAGS}" "CXXFLAGS=${PASSTHROUGH_CMAKE_CXX_FLAGS}" ${OPENSSL_SHARED_FLAG} no-tests "--prefix=${OPENSSL_BIN_DIR}" "--openssldir=${OPENSSL_BIN_DIR}"
BUILD_BYPRODUCTS ${OPENSSL_LIBRARIES_LIST}
PATCH_COMMAND ${PC}
EXCLUDE_FROM_ALL TRUE
Expand Down
2 changes: 1 addition & 1 deletion extensions/aws/processors/S3Processor.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -121,7 +121,7 @@ void S3Processor::onSchedule(const std::shared_ptr<core::ProcessContext>& contex

if (auto communications_timeout = context->getProperty<core::TimePeriodValue>(CommunicationsTimeout)) {
logger_->log_debug("S3Processor: Communications Timeout %" PRId64 " ms", communications_timeout->getMilliseconds().count());
client_config_->connectTimeoutMs = gsl::narrow<int64_t>(communications_timeout->getMilliseconds().count());
client_config_->connectTimeoutMs = gsl::narrow<long>(communications_timeout->getMilliseconds().count()); // NOLINT(runtime/int)
} else {
throw Exception(PROCESS_SCHEDULE_EXCEPTION, "Communications Timeout missing or invalid");
}
Expand Down
1 change: 1 addition & 0 deletions extensions/aws/processors/S3Processor.h
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@
#include <optional>
#include <set>
#include <string>
#include <string_view>
#include <utility>

#include "aws/core/auth/AWSCredentialsProvider.h"
Expand Down
4 changes: 2 additions & 2 deletions extensions/aws/s3/S3Wrapper.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -129,7 +129,7 @@ std::optional<S3Wrapper::UploadPartsResult> S3Wrapper::uploadParts(const PutObje
auto upload_part_request = Aws::S3::Model::UploadPartRequest{}
.WithBucket(put_object_params.bucket)
.WithKey(put_object_params.object_key)
.WithPartNumber(part_number)
.WithPartNumber(gsl::narrow<int>(part_number))
.WithUploadId(upload_state.upload_id);
upload_part_request.SetBody(stream_ptr);

Expand Down Expand Up @@ -164,7 +164,7 @@ std::optional<Aws::S3::Model::CompleteMultipartUploadResult> S3Wrapper::complete
for (size_t i = 0; i < upload_parts_result.part_etags.size(); ++i) {
auto part = Aws::S3::Model::CompletedPart{}
.WithETag(upload_parts_result.part_etags[i])
.WithPartNumber(i + 1);
.WithPartNumber(gsl::narrow<int>(i + 1));
completed_multipart_upload.AddParts(part);
}

Expand Down
4 changes: 2 additions & 2 deletions extensions/azure/storage/AzureDataLakeStorageClient.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ std::unique_ptr<Azure::Storage::Files::DataLake::DataLakeFileSystemClient> Azure
const AzureStorageCredentials& credentials, const std::string& file_system_name, std::optional<uint64_t> number_of_retries) {
Azure::Storage::Files::DataLake::DataLakeClientOptions options;
if (number_of_retries) {
options.Retry.MaxRetries = *number_of_retries;
options.Retry.MaxRetries = gsl::narrow<int32_t>(*number_of_retries);
}

if (credentials.getUseManagedIdentityCredentials()) {
Expand Down Expand Up @@ -89,7 +89,7 @@ std::unique_ptr<io::InputStream> AzureDataLakeStorageClient::fetchFile(const Fet
if (params.range_start || params.range_length) {
Azure::Core::Http::HttpRange range;
if (params.range_start) {
range.Offset = *params.range_start;
range.Offset = gsl::narrow<int64_t>(*params.range_start);
}

if (params.range_length) {
Expand Down
8 changes: 4 additions & 4 deletions extensions/elasticsearch/PostElasticsearch.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -142,15 +142,15 @@ class ElasticPayload {
[[nodiscard]] std::string headerString() const {
rapidjson::Document first_line = rapidjson::Document(rapidjson::kObjectType);

auto operation_index_key = rapidjson::Value(operation_.data(), operation_.size());
auto operation_index_key = rapidjson::Value(operation_.data(), gsl::narrow<rapidjson::SizeType>(operation_.size()));
first_line.AddMember(operation_index_key, rapidjson::Value{rapidjson::kObjectType}, first_line.GetAllocator());
auto& operation_request = first_line[operation_.c_str()];

auto index_json = rapidjson::Value(index_.data(), index_.size());
auto index_json = rapidjson::Value(index_.data(), gsl::narrow<rapidjson::SizeType>(index_.size()));
operation_request.AddMember("_index", index_json, first_line.GetAllocator());

if (id_) {
auto id_json = rapidjson::Value(id_->data(), id_->size());
auto id_json = rapidjson::Value(id_->data(), gsl::narrow<rapidjson::SizeType>(id_->size()));
operation_request.AddMember("_id", id_json, first_line.GetAllocator());
}

Expand Down Expand Up @@ -211,7 +211,7 @@ void processResponseFromElastic(const rapidjson::Document& response, core::Proce
auto& items = response["items"];
gsl_Expects(items.IsArray());
gsl_Expects(items.Size() == flowfiles_sent.size());
for (size_t i = 0; i < items.Size(); ++i) {
for (rapidjson::SizeType i = 0; i < items.Size(); ++i) {
gsl_Expects(items[i].IsObject());
for (auto it = items[i].MemberBegin(); it != items[i].MemberEnd(); ++it) {
addAttributesFromResponse("elasticsearch", it, *flowfiles_sent[i]);
Expand Down
4 changes: 2 additions & 2 deletions extensions/expression-language/common/Value.h
Original file line number Diff line number Diff line change
Expand Up @@ -142,9 +142,9 @@ class Value {
if (value.empty()) return default_value;
try {
return std::invoke(conversion_function, value);
} catch (const std::invalid_argument& ex) {
} catch (const std::invalid_argument&) {
throw std::invalid_argument{utils::StringUtils::join_pack(context, " failed to parse \"", value, "\": invalid argument")};
} catch (const std::out_of_range& ex) {
} catch (const std::out_of_range&) {
throw std::out_of_range{utils::StringUtils::join_pack(context, " failed to parse \"", value, "\": out of range")};
}
}
Expand Down
6 changes: 3 additions & 3 deletions extensions/libarchive/WriteArchiveStream.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ WriteArchiveStreamImpl::archive_ptr WriteArchiveStreamImpl::createWriteArchive()
return nullptr;
}

int result;
int result = 0;

result = archive_write_set_format_ustar(arch.get());
if (result != ARCHIVE_OK) {
Expand Down Expand Up @@ -94,7 +94,7 @@ bool WriteArchiveStreamImpl::newEntry(const EntryInfo& info) {
return false;
}
archive_entry_set_pathname(arch_entry_.get(), info.filename.c_str());
archive_entry_set_size(arch_entry_.get(), info.size);
archive_entry_set_size(arch_entry_.get(), gsl::narrow<la_int64_t>(info.size));
archive_entry_set_mode(arch_entry_.get(), S_IFREG | 0755);

int result = archive_write_header(arch_.get(), arch_entry_.get());
Expand All @@ -115,7 +115,7 @@ size_t WriteArchiveStreamImpl::write(const uint8_t* data, size_t len) {
}
gsl_Expects(data);

int result = archive_write_data(arch_.get(), data, len);
int result = gsl::narrow<int>(archive_write_data(arch_.get(), data, len));
if (result < 0) {
logger_->log_error("Archive write data error %s", archive_error_string(arch_.get()));
arch_entry_.reset();
Expand Down
12 changes: 6 additions & 6 deletions extensions/librdkafka/ConsumeKafka.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -113,7 +113,7 @@ void rebalance_cb(rd_kafka_t* rk, rd_kafka_resp_err_t trigger, rd_kafka_topic_pa
} // namespace

void ConsumeKafka::create_topic_partition_list() {
kf_topic_partition_list_ = { rd_kafka_topic_partition_list_new(topic_names_.size()), utils::rd_kafka_topic_partition_list_deleter() };
kf_topic_partition_list_ = { rd_kafka_topic_partition_list_new(gsl::narrow<int>(topic_names_.size())), utils::rd_kafka_topic_partition_list_deleter() };

// On subscriptions any topics prefixed with ^ will be regex matched
if (utils::StringUtils::equalsIgnoreCase(TOPIC_FORMAT_PATTERNS, topic_name_format_)) {
Expand Down Expand Up @@ -229,8 +229,8 @@ std::vector<std::unique_ptr<rd_kafka_message_t, utils::rd_kafka_message_deleter>
auto elapsed = std::chrono::steady_clock::now() - start;
while (messages.size() < max_poll_records_ && elapsed < max_poll_time_milliseconds_) {
logger_->log_debug("Polling for new messages for %d milliseconds...", max_poll_time_milliseconds_.count());
std::unique_ptr<rd_kafka_message_t, utils::rd_kafka_message_deleter>
message { rd_kafka_consumer_poll(consumer_.get(), std::chrono::duration_cast<std::chrono::milliseconds>(max_poll_time_milliseconds_ - elapsed).count()), utils::rd_kafka_message_deleter() };
const auto timeout_ms = gsl::narrow<int>(std::chrono::duration_cast<std::chrono::milliseconds>(max_poll_time_milliseconds_ - elapsed).count());
std::unique_ptr<rd_kafka_message_t, utils::rd_kafka_message_deleter> message{rd_kafka_consumer_poll(consumer_.get(), timeout_ms)};
if (!message) {
break;
}
Expand Down Expand Up @@ -281,7 +281,7 @@ std::string ConsumeKafka::resolve_duplicate_headers(const std::vector<std::strin
std::vector<std::string> ConsumeKafka::get_matching_headers(const rd_kafka_message_t& message, const std::string& header_name) const {
// Headers fetched this way are freed when rd_kafka_message_destroy is called
// Detaching them using rd_kafka_message_detach_headers does not seem to work
rd_kafka_headers_t* headers_raw;
rd_kafka_headers_t* headers_raw = nullptr;
const rd_kafka_resp_err_t get_header_response = rd_kafka_message_headers(&message, &headers_raw);
if (RD_KAFKA_RESP_ERR__NOENT == get_header_response) {
return {};
Expand All @@ -291,8 +291,8 @@ std::vector<std::string> ConsumeKafka::get_matching_headers(const rd_kafka_messa
}
std::vector<std::string> matching_headers;
for (std::size_t header_idx = 0;; ++header_idx) {
const char* value; // Not to be freed
std::size_t size;
const char* value = nullptr; // Not to be freed
std::size_t size = 0;
if (RD_KAFKA_RESP_ERR_NO_ERROR != rd_kafka_header_get(headers_raw, header_idx, header_name.c_str(), (const void**)(&value), &size)) {
break;
}
Expand Down
2 changes: 1 addition & 1 deletion extensions/librdkafka/PublishKafka.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -306,7 +306,7 @@ class ReadCallback {
error_ = rd_kafka_err2str(err);
return read_size_;
}
read_size_ += readRet;
read_size_ += gsl::narrow<uint32_t>(readRet);
}
return read_size_;
}
Expand Down
2 changes: 1 addition & 1 deletion extensions/mqtt/processors/PublishMQTT.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -216,7 +216,7 @@ void PublishMQTT::setMqtt5Properties(MQTTAsync_message& message, const std::stri
if (message_expiry_interval_.has_value()) {
MQTTProperty property;
property.identifier = MQTTPROPERTY_CODE_MESSAGE_EXPIRY_INTERVAL;
property.value.integer4 = message_expiry_interval_->count(); // NOLINT(cppcoreguidelines-pro-type-union-access)
property.value.integer4 = gsl::narrow<int>(message_expiry_interval_->count()); // NOLINT(cppcoreguidelines-pro-type-union-access)
MQTTProperties_add(&message.properties, &property);
}

Expand Down
4 changes: 2 additions & 2 deletions extensions/pdh/PDHCounters.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@ std::string PDHCounter::getCounterName() const {
}

void SinglePDHCounter::addToJson(rapidjson::Value& body, rapidjson::Document::AllocatorType& alloc) const {
rapidjson::Value key(getCounterName().c_str(), getCounterName().length(), alloc);
rapidjson::Value key(getCounterName().c_str(), gsl::narrow<rapidjson::SizeType>(getCounterName().length()), alloc);
rapidjson::Value& group_node = acquireNode(getObjectName(), body, alloc);
group_node.AddMember(key, getValue(), alloc);
}
Expand Down Expand Up @@ -91,7 +91,7 @@ void PDHCounterArray::addToJson(rapidjson::Value& body, rapidjson::Document::All
rapidjson::Value& counter_node = acquireNode(node_name, group_node, alloc);
rapidjson::Value value = getValue(i);
rapidjson::Value key;
key.SetString(getCounterName().c_str(), getCounterName().length(), alloc);
key.SetString(getCounterName().c_str(), gsl::narrow<rapidjson::SizeType>(getCounterName().length()), alloc);
counter_node.AddMember(key, value, alloc);
}
}
Expand Down
3 changes: 2 additions & 1 deletion extensions/python/tests/PythonManifestTests.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@
#include "flow-tests/TestControllerWithFlow.h"
#include "EmptyFlow.h"
#include "c2/C2MetricsPublisher.h"
#include "utils/gsl.h"

using minifi::state::response::SerializedResponseNode;

Expand All @@ -40,7 +41,7 @@ const SerializedResponseNode& getNode(const std::vector<SerializedResponseNode>&
for (auto& node : nodes) {
if (node.name == name) return node;
}
assert(false);
gsl_FailFast();
}

TEST_CASE("Python processor's description is part of the manifest") {
Expand Down
3 changes: 2 additions & 1 deletion extensions/smb/tests/SmbConnectionControllerServiceTests.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@
#include "Catch.h"
#include "SmbConnectionControllerService.h"
#include "utils/TempSmbShare.h"
#include "utils/UnicodeConversion.h"

namespace org::apache::nifi::minifi::extensions::smb::test {

Expand Down Expand Up @@ -49,7 +50,7 @@ TEST_CASE_METHOD(SmbConnectionControllerServiceFixture, "SmbConnectionController

SECTION("Valid share") {
plan_->setProperty(smb_connection_node_, SmbConnectionControllerService::Hostname, "localhost");
plan_->setProperty(smb_connection_node_, SmbConnectionControllerService::Share, minifi::utils::OsUtils::wideStringToString(share_local_name));
plan_->setProperty(smb_connection_node_, SmbConnectionControllerService::Share, minifi::utils::to_string(share_local_name));

REQUIRE_NOTHROW(plan_->finalize());

Expand Down
14 changes: 3 additions & 11 deletions extensions/sql/data/JSONSQLWriter.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -22,11 +22,7 @@
#include "rapidjson/prettywriter.h"
#include "Exception.h"

namespace org {
namespace apache {
namespace nifi {
namespace minifi {
namespace sql {
namespace org::apache::nifi::minifi::sql {

JSONSQLWriter::JSONSQLWriter(bool pretty, ColumnFilter column_filter)
: pretty_(pretty), current_batch_(rapidjson::kArrayType), column_filter_(std::move(column_filter)) {
Expand Down Expand Up @@ -83,7 +79,7 @@ void JSONSQLWriter::addToJSONRow(const std::string& column_name, rapidjson::Valu

rapidjson::Value JSONSQLWriter::toJSONString(const std::string& s) {
rapidjson::Value jsonValue;
jsonValue.SetString(s.c_str(), s.size(), current_batch_.GetAllocator());
jsonValue.SetString(s.c_str(), gsl::narrow<rapidjson::SizeType>(s.size()), current_batch_.GetAllocator());

return jsonValue;
}
Expand All @@ -102,8 +98,4 @@ std::string JSONSQLWriter::toString() {
return {buffer.GetString(), buffer.GetSize()};
}

} // namespace sql
} // namespace minifi
} // namespace nifi
} // namespace apache
} // namespace org
} // namespace org::apache::nifi::minifi::sql
Loading
Loading