Skip to content

Commit

Permalink
Fix the type casting bug in labels (#221)
Browse files Browse the repository at this point in the history
* Fixing CMake configs for APPLE

* Fix the type annotation error in classId

* Fix the version parameter in export_model.py

* Revert others types temporarily

* Update README.md for macOS support
  • Loading branch information
zhiqwang authored Oct 30, 2021
1 parent b8d845d commit cc3259a
Show file tree
Hide file tree
Showing 4 changed files with 35 additions and 14 deletions.
23 changes: 15 additions & 8 deletions deployment/onnxruntime/CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
cmake_minimum_required(VERSION 3.0.0)
cmake_minimum_required(VERSION 3.18.0)
project(yolort_onnx)

option(ONNXRUNTIME_DIR "Path to built ONNX Runtime directory." STRING)
Expand All @@ -11,15 +11,22 @@ add_executable(yolort_onnx main.cpp)
set(CMAKE_CXX_STANDARD 20)
set(CMAKE_CXX_STANDARD_REQUIRED ON)

target_include_directories(yolort_onnx PRIVATE "${ONNXRUNTIME_DIR}/include")
# link_directories("${ONNXRUNTIME_DIR}/lib")
if (APPLE)
target_include_directories(yolort_onnx PRIVATE "${ONNXRUNTIME_DIR}/include/onnxruntime/core/session")
else()
target_include_directories(yolort_onnx PRIVATE "${ONNXRUNTIME_DIR}/include")
endif()

target_link_libraries(yolort_onnx ${OpenCV_LIBS})

if (WIN32)
target_link_libraries(yolort_onnx "${ONNXRUNTIME_DIR}/lib/onnxruntime.lib")
endif(WIN32)
target_link_libraries(yolort_onnx "${ONNXRUNTIME_DIR}/lib/onnxruntime.lib")
endif()

if (UNIX)
target_link_libraries(yolort_onnx "${ONNXRUNTIME_DIR}/lib/libonnxruntime.so")
endif(UNIX)
if (APPLE)
target_link_libraries(yolort_onnx "${ONNXRUNTIME_DIR}/lib/libonnxruntime.dylib")
endif()

if (UNIX AND NOT APPLE)
target_link_libraries(yolort_onnx "${ONNXRUNTIME_DIR}/lib/libonnxruntime.so")
endif()
5 changes: 2 additions & 3 deletions deployment/onnxruntime/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ The ONNXRuntime inference for `yolort`, both GPU and CPU are supported.

## Dependencies

- Ubuntu 20.04 / Windows 10
- Ubuntu 20.04 / Windows 10 / macOS
- ONNXRuntime 1.7 +
- OpenCV 4.5 +
- CUDA 11 \[Optional\]
Expand Down Expand Up @@ -40,7 +40,6 @@ The `ONNX` model exported with `yolort` differs from the official one in the fol

```bash
python tools/export_model.py [--checkpoint_path path/to/custom/best.pt]
[--simplify]
```

And then, you can find that a new pair of ONNX models ("best.onnx" and "best.sim.onnx") has been generated in the directory of "best.pt".
Expand All @@ -50,7 +49,7 @@ The `ONNX` model exported with `yolort` differs from the official one in the fol
```python
from yolort.runtime import PredictorORT
detector = PredictorORT("best.sim.onnx")
detector = PredictorORT("best.onnx")
img_path = "bus.jpg"
scores, class_ids, boxes = detector.run_on_image(img_path)
```
Expand Down
2 changes: 1 addition & 1 deletion deployment/onnxruntime/main.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -147,7 +147,7 @@ void YOLOv5Detector::preprocessing(cv::Mat &image, float* blob)
std::vector<Detection> YOLOv5Detector::postprocessing(cv::Mat& image, std::vector<Ort::Value>& outputTensors)
{
const auto* scoresTensor = outputTensors[0].GetTensorData<float>();
const auto* classIdsTensor = outputTensors[1].GetTensorData<float>();
const auto* classIdsTensor = outputTensors[1].GetTensorData<int64_t>();
const auto* boxesTensor = outputTensors[2].GetTensorData<float>();

size_t count = outputTensors[0].GetTensorTypeAndShapeInfo().GetElementCount();
Expand Down
19 changes: 17 additions & 2 deletions tools/export_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,13 @@ def get_parser():
type=float,
help="Score threshold used for postprocessing the detections.",
)
parser.add_argument(
"--version",
type=str,
default="r6.0",
help="Upstream version released by the ultralytics/yolov5, Possible "
"values are ['r3.1', 'r4.0', 'r6.0']. Default: 'r6.0'.",
)
parser.add_argument(
"--export_friendly",
action="store_true",
Expand Down Expand Up @@ -132,7 +139,11 @@ def cli_main():
}
input_names = ["images_tensors"]
output_names = ["scores", "labels", "boxes"]
model = YOLO.load_from_yolov5(checkpoint_path, score_thresh=args.score_thresh)
model = YOLO.load_from_yolov5(
checkpoint_path,
score_thresh=args.score_thresh,
version=args.version,
)
model.eval()
else:
# input data
Expand All @@ -146,7 +157,11 @@ def cli_main():
}
input_names = ["images_tensors"]
output_names = ["scores", "labels", "boxes"]
model = YOLOv5.load_from_yolov5(checkpoint_path, score_thresh=args.score_thresh)
model = YOLOv5.load_from_yolov5(
checkpoint_path,
score_thresh=args.score_thresh,
version=args.version,
)
model.eval()

# export ONNX models
Expand Down

0 comments on commit cc3259a

Please sign in to comment.