Skip to content

Commit

Permalink
MINIFICPP-2185 Upgrade to Visual Studio 2022
Browse files Browse the repository at this point in the history
  • Loading branch information
fgerlits committed Sep 25, 2023
1 parent fe7abb2 commit 2c28fa4
Show file tree
Hide file tree
Showing 49 changed files with 277 additions and 228 deletions.
69 changes: 60 additions & 9 deletions .github/workflows/ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -69,10 +69,10 @@ jobs:
with:
name: macos-binaries
path: build/bin
windows_VS2019:
name: "windows-2019"
runs-on: windows-2019
timeout-minutes: 180
windows_VS2022:
name: "windows-2022"
runs-on: windows-2022
timeout-minutes: 240
steps:
- name: Support longpaths
run: git config --system core.longpaths true
Expand All @@ -82,11 +82,11 @@ jobs:
uses: actions/cache/restore@v3
with:
path: ~/AppData/Local/Mozilla/sccache/cache
key: ${{ runner.os }}-sccache-${{ github.ref }}-${{ github.sha }}
key: ${{ runner.os }}-2022-sccache-${{ github.ref }}-${{ github.sha }}
restore-keys: |
${{ runner.os }}-sccache-${{ github.ref }}
${{ runner.os }}-sccache-refs/heads/main
${{ runner.os }}-sccache
${{ runner.os }}-2022-sccache-${{ github.ref }}
${{ runner.os }}-2022-sccache-refs/heads/main
${{ runner.os }}-2022-sccache
- name: Run sccache-cache
uses: mozilla-actions/[email protected]
- name: Install ninja-build tool
Expand Down Expand Up @@ -116,13 +116,64 @@ jobs:
if: always()
with:
path: ~/AppData/Local/Mozilla/sccache/cache
key: ${{ runner.os }}-sccache-${{ github.ref }}-${{ github.sha }}
key: ${{ runner.os }}-2022-sccache-${{ github.ref }}-${{ github.sha }}
- name: test
run: cd ..\b && ctest --timeout 300 --parallel %NUMBER_OF_PROCESSORS% -C Release --output-on-failure
shell: cmd
- name: linter
run: cd ..\b && cmake --build . --target linter --config Release -j 8
shell: cmd
windows_VS2019:
name: "windows-2019"
runs-on: windows-2019
timeout-minutes: 240
steps:
- name: Support longpaths
run: git config --system core.longpaths true
- id: checkout
uses: actions/checkout@v3
- name: sccache cache restore
uses: actions/cache/restore@v3
with:
path: ~/AppData/Local/Mozilla/sccache/cache
key: ${{ runner.os }}-2019-sccache-${{ github.ref }}-${{ github.sha }}
restore-keys: |
${{ runner.os }}-2019-sccache-${{ github.ref }}
${{ runner.os }}-2019-sccache-refs/heads/main
${{ runner.os }}-2019-sccache
- name: Run sccache-cache
uses: mozilla-actions/[email protected]
- name: Install ninja-build tool
uses: seanmiddleditch/gha-setup-ninja@v3
- name: Set up Python
uses: actions/setup-python@v4
with:
python-version: '3.11'
- name: Set up Lua
uses: xpol/[email protected]
- name: Set up NASM for OpenSSL
uses: ilammy/setup-nasm@v1
- id: install-sqliteodbc-driver
run: |
Invoke-WebRequest -Uri "http://www.ch-werner.de/sqliteodbc/sqliteodbc_w64.exe" -OutFile "sqliteodbc_w64.exe"
if ((Get-FileHash 'sqliteodbc_w64.exe').Hash -ne "0df79be4a4412542839ebf405b20d95a7dfc803da0b0b6b0dc653d30dc82ee84") {Write "Hash mismatch"; Exit 1}
./sqliteodbc_w64.exe /S
shell: powershell
- name: build
run: |
for /f "usebackq delims=" %%i in (`vswhere.exe -latest -property installationPath`) do if exist "%%i\Common7\Tools\vsdevcmd.bat" call "%%i\Common7\Tools\vsdevcmd.bat" -arch=x64 -host_arch=x64
win_build_vs.bat ..\b /64 /CI /S /A /PDH /SPLUNK /GCP /ELASTIC /K /L /R /Z /N /RO /PR /PYTHON_SCRIPTING /LUA_SCRIPTING /MQTT /SCCACHE /NINJA
sccache --show-stats
shell: cmd
- name: sccache cache save
uses: actions/cache/save@v3
if: always()
with:
path: ~/AppData/Local/Mozilla/sccache/cache
key: ${{ runner.os }}-2019-sccache-${{ github.ref }}-${{ github.sha }}
- name: test
run: cd ..\b && ctest --timeout 300 --parallel %NUMBER_OF_PROCESSORS% -C Release --output-on-failure
shell: cmd
ubuntu_20_04:
name: "ubuntu-20.04"
runs-on: ubuntu-20.04
Expand Down
2 changes: 1 addition & 1 deletion CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -106,7 +106,7 @@ endif()
# Enable usage of the VERSION specifier
if (WIN32)
add_compile_definitions(WIN32_LEAN_AND_MEAN _CRT_SECURE_NO_WARNINGS NOMINMAX)
add_compile_options(/W3 /utf-8 /bigobj)
add_compile_options(/W3 /utf-8 /bigobj /MP)
endif()

if (NOT PORTABLE)
Expand Down
8 changes: 7 additions & 1 deletion cmake/BundledOSSPUUID.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,13 @@ function(use_bundled_osspuuid SOURCE_DIR BINARY_DIR)
ENDFOREACH(BYPRODUCT)

# Build project
set(CONFIGURE_COMMAND ./configure "CC=${CMAKE_C_COMPILER}" "CXX=${CMAKE_CXX_COMPILER}" "CFLAGS=${PASSTHROUGH_CMAKE_C_FLAGS} -fPIC" "CXXFLAGS=${PASSTHROUGH_CMAKE_CXX_FLAGS} -fPIC" --enable-shared=no --with-cxx --without-perl --without-php --without-pgsql "--prefix=${BINARY_DIR}/thirdparty/ossp-uuid-install")
if(WIN32)
set(ADDITIONAL_COMPILER_FLAGS "")
else()
set(ADDITIONAL_COMPILER_FLAGS "-fPIC")
endif()
set(CONFIGURE_COMMAND ./configure "CC=${CMAKE_C_COMPILER}" "CXX=${CMAKE_CXX_COMPILER}" "CFLAGS=${PASSTHROUGH_CMAKE_C_FLAGS} ${ADDITIONAL_COMPILER_FLAGS}" "CXXFLAGS=${PASSTHROUGH_CMAKE_CXX_FLAGS} ${ADDITIONAL_COMPILER_FLAGS}" --enable-shared=no --with-cxx --without-perl --without-php --without-pgsql "--prefix=${BINARY_DIR}/thirdparty/ossp-uuid-install")

string(TOLOWER "${CMAKE_BUILD_TYPE}" build_type)
if(NOT build_type MATCHES debug)
list(APPEND CONFIGURE_COMMAND --enable-debug=yes)
Expand Down
2 changes: 1 addition & 1 deletion cmake/BundledOpenSSL.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,7 @@ function(use_openssl SOURCE_DIR BINARY_DIR)
URL_HASH "SHA256=b3aa61334233b852b63ddb048df181177c2c659eb9d4376008118f9c08d07674"
SOURCE_DIR "${BINARY_DIR}/thirdparty/openssl-src"
BUILD_IN_SOURCE true
CONFIGURE_COMMAND perl Configure "CFLAGS=${PASSTHROUGH_CMAKE_C_FLAGS} -fPIC" "CXXFLAGS=${PASSTHROUGH_CMAKE_CXX_FLAGS} -fPIC" ${OPENSSL_SHARED_FLAG} no-tests "--prefix=${OPENSSL_BIN_DIR}" "--openssldir=${OPENSSL_BIN_DIR}"
CONFIGURE_COMMAND perl Configure "CFLAGS=${PASSTHROUGH_CMAKE_C_FLAGS}" "CXXFLAGS=${PASSTHROUGH_CMAKE_CXX_FLAGS}" ${OPENSSL_SHARED_FLAG} no-tests "--prefix=${OPENSSL_BIN_DIR}" "--openssldir=${OPENSSL_BIN_DIR}"
BUILD_BYPRODUCTS ${OPENSSL_LIBRARIES_LIST}
PATCH_COMMAND ${PC}
EXCLUDE_FROM_ALL TRUE
Expand Down
2 changes: 1 addition & 1 deletion extensions/aws/processors/S3Processor.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -121,7 +121,7 @@ void S3Processor::onSchedule(const std::shared_ptr<core::ProcessContext>& contex

if (auto communications_timeout = context->getProperty<core::TimePeriodValue>(CommunicationsTimeout)) {
logger_->log_debug("S3Processor: Communications Timeout %" PRId64 " ms", communications_timeout->getMilliseconds().count());
client_config_->connectTimeoutMs = gsl::narrow<int64_t>(communications_timeout->getMilliseconds().count());
client_config_->connectTimeoutMs = gsl::narrow<long>(communications_timeout->getMilliseconds().count()); // NOLINT(runtime/int)
} else {
throw Exception(PROCESS_SCHEDULE_EXCEPTION, "Communications Timeout missing or invalid");
}
Expand Down
1 change: 1 addition & 0 deletions extensions/aws/processors/S3Processor.h
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@
#include <optional>
#include <set>
#include <string>
#include <string_view>
#include <utility>

#include "aws/core/auth/AWSCredentialsProvider.h"
Expand Down
4 changes: 2 additions & 2 deletions extensions/aws/s3/S3Wrapper.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -129,7 +129,7 @@ std::optional<S3Wrapper::UploadPartsResult> S3Wrapper::uploadParts(const PutObje
auto upload_part_request = Aws::S3::Model::UploadPartRequest{}
.WithBucket(put_object_params.bucket)
.WithKey(put_object_params.object_key)
.WithPartNumber(part_number)
.WithPartNumber(gsl::narrow<int>(part_number))
.WithUploadId(upload_state.upload_id);
upload_part_request.SetBody(stream_ptr);

Expand Down Expand Up @@ -164,7 +164,7 @@ std::optional<Aws::S3::Model::CompleteMultipartUploadResult> S3Wrapper::complete
for (size_t i = 0; i < upload_parts_result.part_etags.size(); ++i) {
auto part = Aws::S3::Model::CompletedPart{}
.WithETag(upload_parts_result.part_etags[i])
.WithPartNumber(i + 1);
.WithPartNumber(gsl::narrow<int>(i + 1));
completed_multipart_upload.AddParts(part);
}

Expand Down
2 changes: 1 addition & 1 deletion extensions/azure/storage/AzureDataLakeStorageClient.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ std::unique_ptr<Azure::Storage::Files::DataLake::DataLakeFileSystemClient> Azure
const AzureStorageCredentials& credentials, const std::string& file_system_name, std::optional<uint64_t> number_of_retries) {
Azure::Storage::Files::DataLake::DataLakeClientOptions options;
if (number_of_retries) {
options.Retry.MaxRetries = *number_of_retries;
options.Retry.MaxRetries = gsl::narrow<int32_t>(*number_of_retries);
}

if (credentials.getUseManagedIdentityCredentials()) {
Expand Down
8 changes: 4 additions & 4 deletions extensions/elasticsearch/PostElasticsearch.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -142,15 +142,15 @@ class ElasticPayload {
[[nodiscard]] std::string headerString() const {
rapidjson::Document first_line = rapidjson::Document(rapidjson::kObjectType);

auto operation_index_key = rapidjson::Value(operation_.data(), operation_.size());
auto operation_index_key = rapidjson::Value(operation_.data(), gsl::narrow<rapidjson::SizeType>(operation_.size()));
first_line.AddMember(operation_index_key, rapidjson::Value{rapidjson::kObjectType}, first_line.GetAllocator());
auto& operation_request = first_line[operation_.c_str()];

auto index_json = rapidjson::Value(index_.data(), index_.size());
auto index_json = rapidjson::Value(index_.data(), gsl::narrow<rapidjson::SizeType>(index_.size()));
operation_request.AddMember("_index", index_json, first_line.GetAllocator());

if (id_) {
auto id_json = rapidjson::Value(id_->data(), id_->size());
auto id_json = rapidjson::Value(id_->data(), gsl::narrow<rapidjson::SizeType>(id_->size()));
operation_request.AddMember("_id", id_json, first_line.GetAllocator());
}

Expand Down Expand Up @@ -211,7 +211,7 @@ void processResponseFromElastic(const rapidjson::Document& response, core::Proce
auto& items = response["items"];
gsl_Expects(items.IsArray());
gsl_Expects(items.Size() == flowfiles_sent.size());
for (size_t i = 0; i < items.Size(); ++i) {
for (rapidjson::SizeType i = 0; i < items.Size(); ++i) {
gsl_Expects(items[i].IsObject());
for (auto it = items[i].MemberBegin(); it != items[i].MemberEnd(); ++it) {
addAttributesFromResponse("elasticsearch", it, *flowfiles_sent[i]);
Expand Down
4 changes: 2 additions & 2 deletions extensions/expression-language/common/Value.h
Original file line number Diff line number Diff line change
Expand Up @@ -142,9 +142,9 @@ class Value {
if (value.empty()) return default_value;
try {
return std::invoke(conversion_function, value);
} catch (const std::invalid_argument& ex) {
} catch (const std::invalid_argument&) {
throw std::invalid_argument{utils::StringUtils::join_pack(context, " failed to parse \"", value, "\": invalid argument")};
} catch (const std::out_of_range& ex) {
} catch (const std::out_of_range&) {
throw std::out_of_range{utils::StringUtils::join_pack(context, " failed to parse \"", value, "\": out of range")};
}
}
Expand Down
2 changes: 1 addition & 1 deletion extensions/libarchive/WriteArchiveStream.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -115,7 +115,7 @@ size_t WriteArchiveStreamImpl::write(const uint8_t* data, size_t len) {
}
gsl_Expects(data);

int result = archive_write_data(arch_.get(), data, len);
int result = gsl::narrow<int>(archive_write_data(arch_.get(), data, len));
if (result < 0) {
logger_->log_error("Archive write data error %s", archive_error_string(arch_.get()));
arch_entry_.reset();
Expand Down
6 changes: 3 additions & 3 deletions extensions/librdkafka/ConsumeKafka.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -113,7 +113,7 @@ void rebalance_cb(rd_kafka_t* rk, rd_kafka_resp_err_t trigger, rd_kafka_topic_pa
} // namespace

void ConsumeKafka::create_topic_partition_list() {
kf_topic_partition_list_ = { rd_kafka_topic_partition_list_new(topic_names_.size()), utils::rd_kafka_topic_partition_list_deleter() };
kf_topic_partition_list_ = { rd_kafka_topic_partition_list_new(gsl::narrow<int>(topic_names_.size())), utils::rd_kafka_topic_partition_list_deleter() };

// On subscriptions any topics prefixed with ^ will be regex matched
if (utils::StringUtils::equalsIgnoreCase(TOPIC_FORMAT_PATTERNS, topic_name_format_)) {
Expand Down Expand Up @@ -229,8 +229,8 @@ std::vector<std::unique_ptr<rd_kafka_message_t, utils::rd_kafka_message_deleter>
auto elapsed = std::chrono::steady_clock::now() - start;
while (messages.size() < max_poll_records_ && elapsed < max_poll_time_milliseconds_) {
logger_->log_debug("Polling for new messages for %d milliseconds...", max_poll_time_milliseconds_.count());
std::unique_ptr<rd_kafka_message_t, utils::rd_kafka_message_deleter>
message { rd_kafka_consumer_poll(consumer_.get(), std::chrono::duration_cast<std::chrono::milliseconds>(max_poll_time_milliseconds_ - elapsed).count()), utils::rd_kafka_message_deleter() };
const auto timeout_ms = gsl::narrow<int>(std::chrono::duration_cast<std::chrono::milliseconds>(max_poll_time_milliseconds_ - elapsed).count());
std::unique_ptr<rd_kafka_message_t, utils::rd_kafka_message_deleter> message{rd_kafka_consumer_poll(consumer_.get(), timeout_ms)};
if (!message) {
break;
}
Expand Down
2 changes: 1 addition & 1 deletion extensions/librdkafka/PublishKafka.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -303,7 +303,7 @@ class ReadCallback {
error_ = rd_kafka_err2str(err);
return read_size_;
}
read_size_ += readRet;
read_size_ += gsl::narrow<uint32_t>(readRet);
}
return read_size_;
}
Expand Down
10 changes: 5 additions & 5 deletions extensions/mqtt/processors/PublishMQTT.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -102,7 +102,7 @@ bool PublishMQTT::sendMessage(const std::vector<std::byte>& buffer, const std::s

MQTTAsync_message message_to_publish = MQTTAsync_message_initializer;
message_to_publish.payload = const_cast<std::byte*>(buffer.data());
message_to_publish.payloadlen = buffer.size();
message_to_publish.payloadlen = gsl::narrow<int>(buffer.size());
message_to_publish.qos = static_cast<int>(qos_);
message_to_publish.retained = retain_;

Expand Down Expand Up @@ -216,14 +216,14 @@ void PublishMQTT::setMqtt5Properties(MQTTAsync_message& message, const std::stri
if (message_expiry_interval_.has_value()) {
MQTTProperty property;
property.identifier = MQTTPROPERTY_CODE_MESSAGE_EXPIRY_INTERVAL;
property.value.integer4 = message_expiry_interval_->count();
property.value.integer4 = gsl::narrow<int>(message_expiry_interval_->count());
MQTTProperties_add(&message.properties, &property);
}

if (!content_type.empty()) {
MQTTProperty property;
property.identifier = MQTTPROPERTY_CODE_CONTENT_TYPE;
property.value.data.len = content_type.length();
property.value.data.len = gsl::narrow<int>(content_type.length());
property.value.data.data = const_cast<char*>(content_type.data());
MQTTProperties_add(&message.properties, &property);
}
Expand All @@ -237,11 +237,11 @@ void PublishMQTT::addAttributesAsUserProperties(MQTTAsync_message& message, cons
property.identifier = MQTTPROPERTY_CODE_USER_PROPERTY;

// key
property.value.data.len = key.length();
property.value.data.len = gsl::narrow<int>(key.length());
property.value.data.data = const_cast<char*>(key.data());

// value
property.value.value.len = value.length();
property.value.value.len = gsl::narrow<int>(value.length());
property.value.value.data = const_cast<char*>(value.data());

MQTTProperties_add(&message.properties, &property);
Expand Down
4 changes: 2 additions & 2 deletions extensions/pdh/PDHCounters.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@ std::string PDHCounter::getCounterName() const {
}

void SinglePDHCounter::addToJson(rapidjson::Value& body, rapidjson::Document::AllocatorType& alloc) const {
rapidjson::Value key(getCounterName().c_str(), getCounterName().length(), alloc);
rapidjson::Value key(getCounterName().c_str(), gsl::narrow<rapidjson::SizeType>(getCounterName().length()), alloc);
rapidjson::Value& group_node = acquireNode(getObjectName(), body, alloc);
group_node.AddMember(key, getValue(), alloc);
}
Expand Down Expand Up @@ -91,7 +91,7 @@ void PDHCounterArray::addToJson(rapidjson::Value& body, rapidjson::Document::All
rapidjson::Value& counter_node = acquireNode(node_name, group_node, alloc);
rapidjson::Value value = getValue(i);
rapidjson::Value key;
key.SetString(getCounterName().c_str(), getCounterName().length(), alloc);
key.SetString(getCounterName().c_str(), gsl::narrow<rapidjson::SizeType>(getCounterName().length()), alloc);
counter_node.AddMember(key, value, alloc);
}
}
Expand Down
3 changes: 2 additions & 1 deletion extensions/python/tests/PythonManifestTests.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@
#include "flow-tests/TestControllerWithFlow.h"
#include "EmptyFlow.h"
#include "c2/C2MetricsPublisher.h"
#include "utils/gsl.h"

using minifi::state::response::SerializedResponseNode;

Expand All @@ -40,7 +41,7 @@ const SerializedResponseNode& getNode(const std::vector<SerializedResponseNode>&
for (auto& node : nodes) {
if (node.name == name) return node;
}
assert(false);
gsl_FailFast();
}

TEST_CASE("Python processor's description is part of the manifest") {
Expand Down
3 changes: 2 additions & 1 deletion extensions/smb/tests/SmbConnectionControllerServiceTests.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@
#include "Catch.h"
#include "SmbConnectionControllerService.h"
#include "utils/TempSmbShare.h"
#include "utils/UnicodeConversion.h"

namespace org::apache::nifi::minifi::extensions::smb::test {

Expand Down Expand Up @@ -49,7 +50,7 @@ TEST_CASE_METHOD(SmbConnectionControllerServiceFixture, "SmbConnectionController

SECTION("Valid share") {
plan_->setProperty(smb_connection_node_, SmbConnectionControllerService::Hostname, "localhost");
plan_->setProperty(smb_connection_node_, SmbConnectionControllerService::Share, minifi::utils::OsUtils::wideStringToString(share_local_name));
plan_->setProperty(smb_connection_node_, SmbConnectionControllerService::Share, minifi::utils::to_string(share_local_name));

REQUIRE_NOTHROW(plan_->finalize());

Expand Down
14 changes: 3 additions & 11 deletions extensions/sql/data/JSONSQLWriter.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -22,11 +22,7 @@
#include "rapidjson/prettywriter.h"
#include "Exception.h"

namespace org {
namespace apache {
namespace nifi {
namespace minifi {
namespace sql {
namespace org::apache::nifi::minifi::sql {

JSONSQLWriter::JSONSQLWriter(bool pretty, ColumnFilter column_filter)
: pretty_(pretty), current_batch_(rapidjson::kArrayType), column_filter_(std::move(column_filter)) {
Expand Down Expand Up @@ -83,7 +79,7 @@ void JSONSQLWriter::addToJSONRow(const std::string& column_name, rapidjson::Valu

rapidjson::Value JSONSQLWriter::toJSONString(const std::string& s) {
rapidjson::Value jsonValue;
jsonValue.SetString(s.c_str(), s.size(), current_batch_.GetAllocator());
jsonValue.SetString(s.c_str(), gsl::narrow<rapidjson::SizeType>(s.size()), current_batch_.GetAllocator());

return jsonValue;
}
Expand All @@ -102,8 +98,4 @@ std::string JSONSQLWriter::toString() {
return {buffer.GetString(), buffer.GetSize()};
}

} // namespace sql
} // namespace minifi
} // namespace nifi
} // namespace apache
} // namespace org
} // namespace org::apache::nifi::minifi::sql
Loading

0 comments on commit 2c28fa4

Please sign in to comment.