From ee95a67fef651cb9ac7c59c9c13722312e527f1f Mon Sep 17 00:00:00 2001 From: Jacob Szwejbka Date: Thu, 16 Oct 2025 12:32:39 -0700 Subject: [PATCH] Remove ET_UNWRAP usage (#15200) Summary: Has issues with MSVC, so reintroduce the minor boilerplate. Differential Revision: D84844855 --- .../llm/runner/multimodal_decoder_runner.h | 16 ++- extension/llm/runner/multimodal_prefiller.cpp | 80 +++++++---- extension/llm/runner/multimodal_runner.cpp | 31 ++++- extension/llm/runner/text_decoder_runner.cpp | 14 +- extension/llm/runner/text_llm_runner.cpp | 19 ++- extension/llm/runner/text_prefiller.cpp | 13 +- extension/llm/runner/text_token_generator.h | 11 +- extension/llm/runner/util.h | 12 +- extension/module/module.cpp | 127 ++++++++++++------ extension/module/module.h | 6 +- runtime/core/test/error_handling_test.cpp | 6 +- 11 files changed, 239 insertions(+), 96 deletions(-) diff --git a/extension/llm/runner/multimodal_decoder_runner.h b/extension/llm/runner/multimodal_decoder_runner.h index c8db3e57000..5773e5ca909 100644 --- a/extension/llm/runner/multimodal_decoder_runner.h +++ b/extension/llm/runner/multimodal_decoder_runner.h @@ -28,8 +28,12 @@ class ET_EXPERIMENTAL MultimodalDecoderRunner executorch::extension::TensorPtr& tokens, int64_t start_pos) override { // run token embedding - auto token_embedding_outputs = - ET_UNWRAP(module_->execute(kTokenEmbeddingMethod, tokens)); + auto token_embedding_result = + module_->execute(kTokenEmbeddingMethod, tokens); + if (!token_embedding_result.ok()) { + return token_embedding_result.error(); + } + auto token_embedding_outputs = std::move(*token_embedding_result); // Return the logits tensor return decode(token_embedding_outputs[0], start_pos); @@ -47,8 +51,12 @@ class ET_EXPERIMENTAL MultimodalDecoderRunner auto start_pos_tensor = ::executorch::extension::from_blob( &start_pos, {1}, executorch::aten::ScalarType::Long); // run text model - auto outputs_res = ET_UNWRAP( - module_->execute(kTextModelMethod, {embeddings, start_pos_tensor})); + auto outputs_result = + module_->execute(kTextModelMethod, {embeddings, start_pos_tensor}); + if (!outputs_result.ok()) { + return outputs_result.error(); + } + auto outputs_res = std::move(*outputs_result); ET_CHECK_MSG( outputs_res.size() == 1, diff --git a/extension/llm/runner/multimodal_prefiller.cpp b/extension/llm/runner/multimodal_prefiller.cpp index 7f5a8356979..8578187128f 100644 --- a/extension/llm/runner/multimodal_prefiller.cpp +++ b/extension/llm/runner/multimodal_prefiller.cpp @@ -42,18 +42,21 @@ Result MultimodalPrefiller::prefill( if (input.is_image()) { const Image& image = input.get_image(); - auto method_meta = ET_UNWRAP( - module_->method_meta(kVisionEncoderMethod), + auto method_meta_result = module_->method_meta(kVisionEncoderMethod); + ET_CHECK_OK_OR_RETURN_ERROR( + method_meta_result.error(), "Failed to get method_meta for %s", kVisionEncoderMethod); + auto method_meta = method_meta_result.get(); ET_CHECK_OR_RETURN_ERROR( method_meta.num_inputs() > 0, InvalidArgument, "Image encoder should have at least 1 input"); - auto input_meta = ET_UNWRAP( - method_meta.input_tensor_meta(0), - "Cannot get input tensor meta at index 0"); + auto input_meta_result = method_meta.input_tensor_meta(0); + ET_CHECK_OK_OR_RETURN_ERROR( + input_meta_result.error(), "Cannot get input tensor meta at index 0"); + auto input_meta = input_meta_result.get(); auto expected_dtype = input_meta.scalar_type(); if (expected_dtype == ::executorch::aten::ScalarType::Float) { @@ -77,47 +80,58 @@ Result MultimodalPrefiller::prefill( // The model might expect a 4D tensor (NCHW), but toTensor() returns a 3D // tensor (CHW). Add a batch dimension of 1 if needed. auto expected_dims = input_meta.sizes(); - auto image_tensor = ET_UNWRAP( - image.toTensor(/*with_batch*/ expected_dims.size() == 4), - "Failed to convert image to tensor"); + auto image_tensor_result = + image.toTensor(/*with_batch*/ expected_dims.size() == 4); + ET_CHECK_OK_OR_RETURN_ERROR( + image_tensor_result.error(), "Failed to convert image to tensor"); + auto image_tensor = image_tensor_result.get(); ET_LOG( Info, "Image tensor dim: %zu, dtype: %s", image_tensor->dim(), ::executorch::runtime::toString(image_tensor->scalar_type())); // Run image encoder - auto image_encoder_outputs = - ET_UNWRAP(module_->execute(kVisionEncoderMethod, image_tensor)); + auto image_encoder_result = + module_->execute(kVisionEncoderMethod, image_tensor); + ET_CHECK_OK_OR_RETURN_ERROR(image_encoder_result.error()); + auto image_encoder_outputs = image_encoder_result.get(); encoder_output = image_encoder_outputs[0]; } else if (input.is_audio()) { const Audio& audio = input.get_audio(); - auto method_meta = ET_UNWRAP( - module_->method_meta(kAudioEncoderMethod), + auto method_meta_result = module_->method_meta(kAudioEncoderMethod); + ET_CHECK_OK_OR_RETURN_ERROR( + method_meta_result.error(), "Failed to get method_meta for %s", kAudioEncoderMethod); + auto method_meta = method_meta_result.get(); ET_CHECK_OR_RETURN_ERROR( method_meta.num_inputs() > 0, InvalidArgument, "Audio encoder should have at least 1 input"); - auto input_meta = ET_UNWRAP( - method_meta.input_tensor_meta(0), - "Cannot get input tensor meta at index 0"); + auto input_meta_result = method_meta.input_tensor_meta(0); + ET_CHECK_OK_OR_RETURN_ERROR( + input_meta_result.error(), "Cannot get input tensor meta at index 0"); + auto input_meta = input_meta_result.get(); auto expected_dtype = input_meta.scalar_type(); // Create tensor with original dtype - auto audio_tensor = - ET_UNWRAP(audio.toTensor(), "Failed to convert audio to tensor"); + auto audio_tensor_result = audio.toTensor(); + ET_CHECK_OK_OR_RETURN_ERROR( + audio_tensor_result.error(), "Failed to convert audio to tensor"); + auto audio_tensor = audio_tensor_result.get(); // Convert to expected dtype if needed if (audio_tensor->scalar_type() != expected_dtype) { if (expected_dtype == ::executorch::aten::ScalarType::BFloat16) { // Convert to bfloat16 - audio_tensor = ET_UNWRAP( - convert_to_bfloat16(audio_tensor), + auto convert_result = convert_to_bfloat16(audio_tensor); + ET_CHECK_OK_OR_RETURN_ERROR( + convert_result.error(), "Failed to convert audio tensor to bfloat16"); + audio_tensor = convert_result.get(); } else { ET_CHECK_OR_RETURN_ERROR( false, @@ -147,7 +161,15 @@ Result MultimodalPrefiller::prefill( std::vector tokens; if (input.is_text()) { auto& text = input.get_text(); - tokens = ET_UNWRAP_TOKENIZER(tokenizer_->encode(text)); + auto encode_result = tokenizer_->encode(text); + if (!encode_result.ok()) { + ET_LOG( + Error, + "Tokenizers error code %d", + static_cast(encode_result.error())); + return ::executorch::runtime::Error::InvalidArgument; + } + tokens = std::move(*encode_result); } else { tokens = input.get_tokens(); } @@ -158,8 +180,10 @@ Result MultimodalPrefiller::prefill( ::executorch::aten::ScalarType::Long); // Run text encoder (token embeddings) - auto token_embedding_outputs = - ET_UNWRAP(module_->execute(kTokenEmbeddingMethod, text_tensor)); + auto token_embedding_result = + module_->execute(kTokenEmbeddingMethod, text_tensor); + ET_CHECK_OK_OR_RETURN_ERROR(token_embedding_result.error()); + auto token_embedding_outputs = token_embedding_result.get(); encoder_output = token_embedding_outputs[0]; } else { @@ -180,8 +204,10 @@ Result MultimodalPrefiller::prefill( } std::vector cache_positions; - auto cache_position_tensor = ET_UNWRAP(populate_start_pos_or_cache_position( - module_, start_pos, cache_positions, seq_len, kTextModelMethod)); + auto cache_position_result = populate_start_pos_or_cache_position( + module_, start_pos, cache_positions, seq_len, kTextModelMethod); + ET_CHECK_OK_OR_RETURN_ERROR(cache_position_result.error()); + auto cache_position_tensor = cache_position_result.get(); auto prefill_result = module_->execute( kTextModelMethod, {encoder_output, cache_position_tensor}); @@ -217,8 +243,10 @@ ::executorch::runtime::Error MultimodalPrefiller::load() { ET_CHECK_OK_OR_RETURN_ERROR(module_->load_method(kTokenEmbeddingMethod)); ET_CHECK_OK_OR_RETURN_ERROR(module_->load_method(kTextModelMethod)); - std::unordered_set methods = - ET_UNWRAP(module_->method_names(), "Failed to get method names"); + auto method_names_result = module_->method_names(); + ET_CHECK_OK_OR_RETURN_ERROR( + method_names_result.error(), "Failed to get method names"); + std::unordered_set methods = method_names_result.get(); // Load image_encoder method if exists. if (methods.find(kVisionEncoderMethod) != methods.end()) { diff --git a/extension/llm/runner/multimodal_runner.cpp b/extension/llm/runner/multimodal_runner.cpp index 8b7e4e315d8..047ca27ee2b 100644 --- a/extension/llm/runner/multimodal_runner.cpp +++ b/extension/llm/runner/multimodal_runner.cpp @@ -67,7 +67,10 @@ Error MultimodalRunner::prefill(const std::vector& inputs) { ET_CHECK_OK_OR_RETURN_ERROR(load()); } for (auto& input : inputs) { - ET_UNWRAP(multimodal_prefiller_->prefill(input, pos_)); + auto prefill_result = multimodal_prefiller_->prefill(input, pos_); + if (!prefill_result.ok()) { + return prefill_result.error(); + } } return Error::Ok; } @@ -125,15 +128,27 @@ Error MultimodalRunner::generate( if (config.echo && i == inputs.size() - 1 && input.is_text()) { wrapped_callback(input.get_text()); } - prefill_next_token = ET_UNWRAP(multimodal_prefiller_->prefill(input, pos_)); + auto prefill_result = multimodal_prefiller_->prefill(input, pos_); + if (!prefill_result.ok()) { + return prefill_result.error(); + } + prefill_next_token = prefill_result.get(); } stats_->first_token_ms = time_in_ms(); stats_->prompt_eval_end_ms = time_in_ms(); stats_->num_prompt_tokens = pos_; - wrapped_callback(ET_UNWRAP_TOKENIZER( - tokenizer_->decode(prefill_next_token, prefill_next_token))); + auto decode_result = + tokenizer_->decode(prefill_next_token, prefill_next_token); + if (!decode_result.ok()) { + ET_LOG( + Error, + "Tokenizers error code %d", + static_cast(decode_result.error())); + return Error::InvalidArgument; + } + wrapped_callback(std::move(*decode_result)); RUNNER_ET_LOG( config.warming, @@ -160,13 +175,17 @@ Error MultimodalRunner::generate( // Generate tokens using the text token generator std::vector prompt_tokens = {prefill_next_token}; - int64_t num_generated_tokens = ET_UNWRAP(text_token_generator_->generate( + auto generate_result = text_token_generator_->generate( /*tokens=*/prompt_tokens, /*start_pos=*/pos_, /*max_new_tokens=*/max_new_tokens - 1, // Subtract 1 because prefill already generated 1 token /*temperature=*/config.temperature, - /*token_callback=*/wrapped_callback)); + /*token_callback=*/wrapped_callback); + if (!generate_result.ok()) { + return generate_result.error(); + } + int64_t num_generated_tokens = generate_result.get(); pos_ += num_generated_tokens; // Update stats diff --git a/extension/llm/runner/text_decoder_runner.cpp b/extension/llm/runner/text_decoder_runner.cpp index 7cd7623f58f..8d51736ace5 100644 --- a/extension/llm/runner/text_decoder_runner.cpp +++ b/extension/llm/runner/text_decoder_runner.cpp @@ -32,15 +32,23 @@ ::executorch::runtime::Result TextDecoderRunner::step( TensorPtr& tokens, int64_t start_pos) { // ET_LOG(Info, "Input token %" PRIu64, input_token); - auto method_meta = ET_UNWRAP(module_->method_meta("forward")); + auto method_meta_result = module_->method_meta("forward"); + if (!method_meta_result.ok()) { + return method_meta_result.error(); + } + auto method_meta = std::move(*method_meta_result); // If only 1 input, we are not using kv cache bool use_kv_cache = method_meta.num_inputs() > 1; std::vector cache_positions; if (use_kv_cache) { - auto start_pos_tensor = ET_UNWRAP(populate_start_pos_or_cache_position( - module_, start_pos, cache_positions, tokens->numel(), "forward")); + auto start_pos_tensor_result = populate_start_pos_or_cache_position( + module_, start_pos, cache_positions, tokens->numel(), "forward"); + if (!start_pos_tensor_result.ok()) { + return start_pos_tensor_result.error(); + } + auto start_pos_tensor = std::move(*start_pos_tensor_result); std::vector inputs; auto inputs_res = io_manager_->prepare_decode(tokens, start_pos_tensor); diff --git a/extension/llm/runner/text_llm_runner.cpp b/extension/llm/runner/text_llm_runner.cpp index 333716ac831..4e0bccdb781 100644 --- a/extension/llm/runner/text_llm_runner.cpp +++ b/extension/llm/runner/text_llm_runner.cpp @@ -170,8 +170,15 @@ Error TextLLMRunner::generate( stats_->prompt_eval_end_ms = time_in_ms(); // print the first token from prefill. No prev_token so use cur_token for it. - wrapped_callback( - ET_UNWRAP_TOKENIZER(tokenizer_->decode(cur_token, cur_token))); + auto decode_result = tokenizer_->decode(cur_token, cur_token); + if (!decode_result.ok()) { + ET_LOG( + Error, + "Tokenizers error code %d", + static_cast(decode_result.error())); + return ::executorch::runtime::Error::InvalidArgument; + } + wrapped_callback(std::move(*decode_result)); RUNNER_ET_LOG( config.warming, "RSS after prompt prefill: %f MiB (0 if unsupported)", @@ -181,12 +188,16 @@ Error TextLLMRunner::generate( prompt_tokens.push_back(cur_token); // Generate max_new_tokens - 1 because prefill already generated 1 token. - int64_t num_generated_tokens = ET_UNWRAP(text_token_generator_->generate( + auto generate_result = text_token_generator_->generate( prompt_tokens, num_prompt_tokens, max_new_tokens - 1, temperature_ == -1.0f ? config.temperature : temperature_, - wrapped_callback)); + wrapped_callback); + if (!generate_result.ok()) { + return generate_result.error(); + } + int64_t num_generated_tokens = generate_result.get(); stats_->inference_end_ms = time_in_ms(); if (!config.warming) { diff --git a/extension/llm/runner/text_prefiller.cpp b/extension/llm/runner/text_prefiller.cpp index de092b6b05d..063ae822489 100644 --- a/extension/llm/runner/text_prefiller.cpp +++ b/extension/llm/runner/text_prefiller.cpp @@ -105,8 +105,11 @@ ::executorch::runtime::Result TextPrefiller::prefill_chunk( // run the first token and get back logits tensor. Assuming the first token // is bos so don't callback. - auto logits_tensor = - ET_UNWRAP(text_decoder_runner_->step(tokens, start_pos)); + auto logits_result = text_decoder_runner_->step(tokens, start_pos); + if (!logits_result.ok()) { + return logits_result.error(); + } + auto logits_tensor = std::move(*logits_result); pos += 1; // start the loop from index 1 start_pos += 1; @@ -116,7 +119,11 @@ ::executorch::runtime::Result TextPrefiller::prefill_chunk( // NOLINTNEXTLINE(facebook-hte-ParameterUncheckedArrayBounds) cur_token = prompt_tokens[pos]; - logits_tensor = ET_UNWRAP(text_decoder_runner_->step(tokens, start_pos)); + auto step_result = text_decoder_runner_->step(tokens, start_pos); + if (!step_result.ok()) { + return step_result.error(); + } + logits_tensor = std::move(*step_result); pos++; start_pos++; diff --git a/extension/llm/runner/text_token_generator.h b/extension/llm/runner/text_token_generator.h index a57961ee1d2..b7fca420bc3 100644 --- a/extension/llm/runner/text_token_generator.h +++ b/extension/llm/runner/text_token_generator.h @@ -110,8 +110,15 @@ class ET_EXPERIMENTAL TextTokenGenerator { } // print the token as string, decode it with the Tokenizer object - token_callback( - ET_UNWRAP_TOKENIZER(tokenizer_->decode(prev_token, cur_token))); + auto decode_result = tokenizer_->decode(prev_token, cur_token); + if (!decode_result.ok()) { + ET_LOG( + Error, + "Tokenizers error code %d", + static_cast(decode_result.error())); + return ::executorch::runtime::Error::InvalidArgument; + } + token_callback(std::move(*decode_result)); if (should_stop_) { break; diff --git a/extension/llm/runner/util.h b/extension/llm/runner/util.h index ec08ecfb647..e87d625f140 100644 --- a/extension/llm/runner/util.h +++ b/extension/llm/runner/util.h @@ -116,8 +116,16 @@ inline runtime::Result populate_start_pos_or_cache_position( const char* method_name = "forward") { // Get expected shape of cache position tensor, which should be the second // argument - auto method_meta = ET_UNWRAP(module->method_meta(method_name)); - auto second_input_info = ET_UNWRAP(method_meta.input_tensor_meta(1)); + auto method_meta_result = module->method_meta(method_name); + if (!method_meta_result.ok()) { + return method_meta_result.error(); + } + auto method_meta = std::move(*method_meta_result); + auto second_input_info_result = method_meta.input_tensor_meta(1); + if (!second_input_info_result.ok()) { + return second_input_info_result.error(); + } + auto second_input_info = std::move(*second_input_info_result); auto second_input_sizes = second_input_info.sizes(); auto numel = second_input_sizes[0]; diff --git a/extension/module/module.cpp b/extension/module/module.cpp index 9de77bcbc79..35228d06729 100644 --- a/extension/module/module.cpp +++ b/extension/module/module.cpp @@ -15,26 +15,6 @@ #include #include -/** - * Unwrap a Result to obtain its value (direct object, not a pointer). - * If the Result contains an error, propagate the error via trivial function - * return. The macro wraps the object into a unique_ptr. - * - * Note: A function using ET_UNWRAP_UNIQUE should itself return a Result or - * Error. - * - * @param[in] result__ Expression yielding the result to unwrap. - */ -#define ET_UNWRAP_UNIQUE(result__) \ - ({ \ - auto et_result__ = (result__); \ - if (!et_result__.ok()) { \ - return et_result__.error(); \ - } \ - std::make_unique>( \ - std::move(*et_result__)); \ - }) - namespace executorch { namespace extension { namespace ET_MODULE_NAMESPACE { @@ -49,21 +29,47 @@ runtime::Result> make_data_loader( Module::LoadMode mode) { std::unique_ptr data_loader; switch (mode) { - case Module::LoadMode::File: - data_loader = ET_UNWRAP_UNIQUE(FileDataLoader::from(file_path.c_str())); + case Module::LoadMode::File: { + auto res = FileDataLoader::from(file_path.c_str()); + if (!res.ok()) { + return res.error(); + } + data_loader = std::make_unique>( + std::move(*res)); break; - case Module::LoadMode::Mmap: - data_loader = ET_UNWRAP_UNIQUE(MmapDataLoader::from( - file_path.c_str(), MmapDataLoader::MlockConfig::NoMlock)); + } + case Module::LoadMode::Mmap: { + auto res_mmap = MmapDataLoader::from( + file_path.c_str(), MmapDataLoader::MlockConfig::NoMlock); + if (!res_mmap.ok()) { + return res_mmap.error(); + } + data_loader = + std::make_unique>( + std::move(*res_mmap)); break; - case Module::LoadMode::MmapUseMlock: - data_loader = ET_UNWRAP_UNIQUE(MmapDataLoader::from(file_path.c_str())); + } + case Module::LoadMode::MmapUseMlock: { + auto res_mlock = MmapDataLoader::from(file_path.c_str()); + if (!res_mlock.ok()) { + return res_mlock.error(); + } + data_loader = + std::make_unique>( + std::move(*res_mlock)); break; - case Module::LoadMode::MmapUseMlockIgnoreErrors: - data_loader = ET_UNWRAP_UNIQUE(MmapDataLoader::from( - file_path.c_str(), - MmapDataLoader::MlockConfig::UseMlockIgnoreErrors)); + } + case Module::LoadMode::MmapUseMlockIgnoreErrors: { + auto res_mlock_ignore = MmapDataLoader::from( + file_path.c_str(), MmapDataLoader::MlockConfig::UseMlockIgnoreErrors); + if (!res_mlock_ignore.ok()) { + return res_mlock_ignore.error(); + } + data_loader = std::make_unique< + std::remove_reference_t>( + std::move(*res_mlock_ignore)); break; + } } return data_loader; } @@ -154,19 +160,33 @@ Module::Module( runtime::Error Module::load(const Program::Verification verification) { if (!is_loaded()) { if (!data_loader_) { - data_loader_ = ET_UNWRAP(make_data_loader(file_path_, load_mode_)); + auto data_loader_result = make_data_loader(file_path_, load_mode_); + if (!data_loader_result.ok()) { + return data_loader_result.error(); + } + data_loader_ = std::move(*data_loader_result); } if (data_files_.size() > 0) { for (const auto& data_file : data_files_) { - data_map_loaders_.push_back( - ET_UNWRAP(make_data_loader(data_file, load_mode_))); + auto data_map_loader_result = make_data_loader(data_file, load_mode_); + if (!data_map_loader_result.ok()) { + return data_map_loader_result.error(); + } + data_map_loaders_.push_back(std::move(*data_map_loader_result)); } } if (data_map_loaders_.size() > 0) { for (auto i = 0; i < data_map_loaders_.size(); ++i) { - named_data_maps_.push_back(ET_UNWRAP_UNIQUE( - FlatTensorDataMap::load(data_map_loaders_[i].get()))); + auto res_flat_tensor = + FlatTensorDataMap::load(data_map_loaders_[i].get()); + if (!res_flat_tensor.ok()) { + return res_flat_tensor.error(); + } + named_data_maps_.push_back( + std::make_unique< + std::remove_reference_t>( + std::move(*res_flat_tensor))); } // Extract raw pointers from unique_ptrs to pass to MergedDataMap::load() @@ -175,13 +195,23 @@ runtime::Error Module::load(const Program::Verification verification) { for (const auto& data_map : named_data_maps_) { raw_data_maps.push_back(data_map.get()); } - merged_data_map_ = ET_UNWRAP_UNIQUE( - MergedDataMap::load(runtime::Span( - raw_data_maps.data(), raw_data_maps.size()))); + auto res_merged = MergedDataMap::load(runtime::Span( + raw_data_maps.data(), raw_data_maps.size())); + if (!res_merged.ok()) { + return res_merged.error(); + } + merged_data_map_ = + std::make_unique>( + std::move(*res_merged)); } + auto res_program = Program::load(data_loader_.get(), verification); + if (!res_program.ok()) { + return res_program.error(); + } auto program = - ET_UNWRAP_UNIQUE(Program::load(data_loader_.get(), verification)); + std::make_unique>( + std::move(*res_program)); program_ = std::shared_ptr( program.release(), [](Program* pointer) { delete pointer; }); } @@ -215,8 +245,11 @@ runtime::Error Module::load_method( MethodHolder method_holder; if (!planned_memory) { - const auto method_metadata = - ET_UNWRAP(program_->method_meta(method_name.c_str())); + auto method_metadata_result = program_->method_meta(method_name.c_str()); + if (!method_metadata_result.ok()) { + return method_metadata_result.error(); + } + const auto method_metadata = std::move(*method_metadata_result); const auto planned_buffers_count = method_metadata.num_memory_planned_buffers(); method_holder.planned_buffers.reserve(planned_buffers_count); @@ -237,11 +270,17 @@ runtime::Error Module::load_method( } method_holder.memory_manager = std::make_unique( memory_allocator_.get(), planned_memory, temp_allocator_.get()); - method_holder.method = ET_UNWRAP_UNIQUE(program_->load_method( + auto res_method = program_->load_method( method_name.c_str(), method_holder.memory_manager.get(), event_tracer ? event_tracer : this->event_tracer(), - merged_data_map_.get())); + merged_data_map_.get()); + if (!res_method.ok()) { + return res_method.error(); + } + method_holder.method = + std::make_unique>( + std::move(*res_method)); methods_.emplace(method_name, std::move(method_holder)); } return runtime::Error::Ok; diff --git a/extension/module/module.h b/extension/module/module.h index 207de768991..e523f163317 100644 --- a/extension/module/module.h +++ b/extension/module/module.h @@ -348,7 +348,11 @@ class Module { ET_NODISCARD inline runtime::Result get( const std::string& method_name, const std::vector& input_values) { - auto result = ET_UNWRAP(execute(method_name, input_values)); + auto execute_result = execute(method_name, input_values); + if (!execute_result.ok()) { + return execute_result.error(); + } + auto result = std::move(*execute_result); if (result.empty()) { return runtime::Error::InvalidArgument; } diff --git a/runtime/core/test/error_handling_test.cpp b/runtime/core/test/error_handling_test.cpp index ef270cad1ed..c1d48c86b89 100644 --- a/runtime/core/test/error_handling_test.cpp +++ b/runtime/core/test/error_handling_test.cpp @@ -29,7 +29,11 @@ Result get_abs(int64_t num) { } Result get_op_name(int64_t op) { - auto unsigned_op = ET_UNWRAP(get_abs(op)); + auto abs_result = get_abs(op); + if (!abs_result.ok()) { + return abs_result.error(); + } + auto unsigned_op = abs_result.get(); switch (unsigned_op) { case 0: