-
Notifications
You must be signed in to change notification settings - Fork 1.9k
Closed
Labels
bugConfirmed bugsConfirmed bugs
Description
🐛 Bug
When I execute Llama-2-7b-chat-hf-q4f16_1-MLC, some errors are raised as follow:
(relax-py10) [chenf@b11b0623:/mnt/ssd/chenf/project/mlc_models/example/llama2]$ /mnt/ssd/chenf/opensource/mlc-llm/build/mlc_chat_cli --model-lib-path prebuilt_libs/Llama-2-7b-chat-hf-q4f16_1-cuda.so --model Llama-2-7b-chat-hf-q4f16_1-MLC/
Use MLC config: "/mnt/ssd/chenf/project/mlc_models/example/llama2/Llama-2-7b-chat-hf-q4f16_1-MLC/mlc-chat-config.json"
Use model weights: "/mnt/ssd/chenf/project/mlc_models/example/llama2/Llama-2-7b-chat-hf-q4f16_1-MLC/ndarray-cache.json"
Use model library: "prebuilt_libs/Llama-2-7b-chat-hf-q4f16_1-cuda.so"
You can use the following special commands:
/help print the special commands
/exit quit the cli
/stats print out the latest stats (token/sec)
/reset restart a fresh chat
/reload [model] reload model `model` from disk, or reload the current model if `model` is not specified
Loading model...
Loading finished
Running system prompts...
[11:32:03] /mnt/ssd/chenf/opensource/mlc-llm/3rdparty/tvm/src/runtime/relax_vm/builtin.cc:310: Check failed: (static_cast<int64_t>(ptr->size()) == size) is false: ValueError: ErrorContext(fn=prefill, loc=param[3], param=params, annotation=R.Tuple(R.Tensor((v, 512), dtype="uint32"), R.Tensor((v, 128), dtype="float16"), R.Tensor((12288, 512), dtype="uint32"), R.Tensor((12288, 128), dtype="float16"), R.Tensor((4096, 512), dtype="uint32"), R.Tensor((4096, 128), dtype="float16"), R.Tensor((22016, 512), dtype="uint32"), R.Tensor((22016, 128), dtype="float16"), R.Tensor((4096, 1376), dtype="uint32"), R.Tensor((4096, 344), dtype="float16"), R.Tensor((4096,), dtype="float16"), R.Tensor((4096,), dtype="float16"), R.Tensor((12288, 512), dtype="uint32"), R.Tensor((12288, 128), dtype="float16"), R.Tensor((4096, 512), dtype="uint32"), R.Tensor((4096, 128), dtype="float16"), R.Tensor((22016, 512), dtype="uint32"), R.Tensor((22016, 128), dtype="float16"), R.Tensor((4096, 1376), dtype="uint32"), R.Tensor((4096, 344), dtype="float16"), R.Tensor((4096,), dtype="float16"), R.Tensor((4096,), dtype="float16"), R.Tensor((12288, 512), dtype="uint32"), R.Tensor((12288, 128), dtype="float16"), R.Tensor((4096, 512), dtype="uint32"), R.Tensor((4096, 128), dtype="float16"), R.Tensor((22016, 512), dtype="uint32"), R.Tensor((22016, 128), dtype="float16"), R.Tensor((4096, 1376), dtype="uint32"), R.Tensor((4096, 344), dtype="float16"), R.Tensor((4096,), dtype="float16"), R.Tensor((4096,), dtype="float16"), R.Tensor((12288, 512), dtype="uint32"), R.Tensor((12288, 128), dtype="float16"), R.Tensor((4096, 512), dtype="uint32"), R.Tensor((4096, 128), dtype="float16"), R.Tensor((22016, 512), dtype="uint32"), R.Tensor((22016, 128), dtype="float16"), R.Tensor((4096, 1376), dtype="uint32"), R.Tensor((4096, 344), dtype="float16"), R.Tensor((4096,), dtype="float16"), R.Tensor((4096,), dtype="float16"), R.Tensor((12288, 512), dtype="uint32"), R.Tensor((12288, 128), dtype="float16"), R.Tensor((4096, 512), dtype="uint32"), R.Tensor((4096, 128), dtype="float16"), R.Tensor((22016, 512), dtype="uint32"), R.Tensor((22016, 128), dtype="float16"), R.Tensor((4096, 1376), dtype="uint32"), R.Tensor((4096, 344), dtype="float16"), R.Tensor((4096,), dtype="float16"), R.Tensor((4096,), dtype="float16"), R.Tensor((12288, 512), dtype="uint32"), R.Tensor((12288, 128), dtype="float16"), R.Tensor((4096, 512), dtype="uint32"), R.Tensor((4096, 128), dtype="float16"), R.Tensor((22016, 512), dtype="uint32"), R.Tensor((22016, 128), dtype="float16"), R.Tensor((4096, 1376), dtype="uint32"), R.Tensor((4096, 344), dtype="float16"), R.Tensor((4096,), dtype="float16"), R.Tensor((4096,), dtype="float16"), R.Tensor((12288, 512), dtype="uint32"), R.Tensor((12288, 128), dtype="float16"), R.Tensor((4096, 512), dtype="uint32"), R.Tensor((4096, 128), dtype="float16"), R.Tensor((22016, 512), dtype="uint32"), R.Tensor((22016, 128), dtype="float16"), R.Tensor((4096, 1376), dtype="uint32"), R.Tensor((4096, 344), dtype="float16"), R.Tensor((4096,), dtype="float16"), R.Tensor((4096,), dtype="float16"), R.Tensor((12288, 512), dtype="uint32"), R.Tensor((12288, 128), dtype="float16"), R.Tensor((4096, 512), dtype="uint32"), R.Tensor((4096, 128), dtype="float16"), R.Tensor((22016, 512), dtype="uint32"), R.Tensor((22016, 128), dtype="float16"), R.Tensor((4096, 1376), dtype="uint32"), R.Tensor((4096, 344), dtype="float16"), R.Tensor((4096,), dtype="float16"), R.Tensor((4096,), dtype="float16"), R.Tensor((12288, 512), dtype="uint32"), R.Tensor((12288, 128), dtype="float16"), R.Tensor((4096, 512), dtype="uint32"), R.Tensor((4096, 128), dtype="float16"), R.Tensor((22016, 512), dtype="uint32"), R.Tensor((22016, 128), dtype="float16"), R.Tensor((4096, 1376), dtype="uint32"), R.Tensor((4096, 344), dtype="float16"), R.Tensor((4096,), dtype="float16"), R.Tensor((4096,), dtype="float16"), R.Tensor((12288, 512), dtype="uint32"), R.Tensor((12288, 128), dtype="float16"), R.Tensor((4096, 512), dtype="uint32"), R.Tensor((4096, 128), dtype="float16"), R.Tensor((22016, 512), dtype="uint32"), R.Tensor((22016, 128), dtype="float16"), R.Tensor((4096, 1376), dtype="uint32"), R.Tensor((4096, 344), dtype="float16"), R.Tensor((4096,), dtype="float16"), R.Tensor((4096,), dtype="float16"), R.Tensor((12288, 512), dtype="uint32"), R.Tensor((12288, 128), dtype="float16"), R.Tensor((4096, 512), dtype="uint32"), R.Tensor((4096, 128), dtype="float16"), R.Tensor((22016, 512), dtype="uint32"), R.Tensor((22016, 128), dtype="float16"), R.Tensor((4096, 1376), dtype="uint32"), R.Tensor((4096, 344), dtype="float16"), R.Tensor((4096,), dtype="float16"), R.Tensor((4096,), dtype="float16"), R.Tensor((12288, 512), dtype="uint32"), R.Tensor((12288, 128), dtype="float16"), R.Tensor((4096, 512), dtype="uint32"), R.Tensor((4096, 128), dtype="float16"), R.Tensor((22016, 512), dtype="uint32"), R.Tensor((22016, 128), dtype="float16"), R.Tensor((4096, 1376), dtype="uint32"), R.Tensor((4096, 344), dtype="float16"), R.Tensor((4096,), dtype="float16"), R.Tensor((4096,), dtype="float16"), R.Tensor((12288, 512), dtype="uint32"), R.Tensor((12288, 128), dtype="float16"), R.Tensor((4096, 512), dtype="uint32"), R.Tensor((4096, 128), dtype="float16"), R.Tensor((22016, 512), dtype="uint32"), R.Tensor((22016, 128), dtype="float16"), R.Tensor((4096, 1376), dtype="uint32"), R.Tensor((4096, 344), dtype="float16"), R.Tensor((4096,), dtype="float16"), R.Tensor((4096,), dtype="float16"), R.Tensor((12288, 512), dtype="uint32"), R.Tensor((12288, 128), dtype="float16"), R.Tensor((4096, 512), dtype="uint32"), R.Tensor((4096, 128), dtype="float16"), R.Tensor((22016, 512), dtype="uint32"), R.Tensor((22016, 128), dtype="float16"), R.Tensor((4096, 1376), dtype="uint32"), R.Tensor((4096, 344), dtype="float16"), R.Tensor((4096,), dtype="float16"), R.Tensor((4096,), dtype="float16"), R.Tensor((12288, 512), dtype="uint32"), R.Tensor((12288, 128), dtype="float16"), R.Tensor((4096, 512), dtype="uint32"), R.Tensor((4096, 128), dtype="float16"), R.Tensor((22016, 512), dtype="uint32"), R.Tensor((22016, 128), dtype="float16"), R.Tensor((4096, 1376), dtype="uint32"), R.Tensor((4096, 344), dtype="float16"), R.Tensor((4096,), dtype="float16"), R.Tensor((4096,), dtype="float16"), R.Tensor((12288, 512), dtype="uint32"), R.Tensor((12288, 128), dtype="float16"), R.Tensor((4096, 512), dtype="uint32"), R.Tensor((4096, 128), dtype="float16"), R.Tensor((22016, 512), dtype="uint32"), R.Tensor((22016, 128), dtype="float16"), R.Tensor((4096, 1376), dtype="uint32"), R.Tensor((4096, 344), dtype="float16"), R.Tensor((4096,), dtype="float16"), R.Tensor((4096,), dtype="float16"), R.Tensor((12288, 512), dtype="uint32"), R.Tensor((12288, 128), dtype="float16"), R.Tensor((4096, 512), dtype="uint32"), R.Tensor((4096, 128), dtype="float16"), R.Tensor((22016, 512), dtype="uint32"), R.Tensor((22016, 128), dtype="float16"), R.Tensor((4096, 1376), dtype="uint32"), R.Tensor((4096, 344), dtype="float16"), R.Tensor((4096,), dtype="float16"), R.Tensor((4096,), dtype="float16"), R.Tensor((12288, 512), dtype="uint32"), R.Tensor((12288, 128), dtype="float16"), R.Tensor((4096, 512), dtype="uint32"), R.Tensor((4096, 128), dtype="float16"), R.Tensor((22016, 512), dtype="uint32"), R.Tensor((22016, 128), dtype="float16"), R.Tensor((4096, 1376), dtype="uint32"), R.Tensor((4096, 344), dtype="float16"), R.Tensor((4096,), dtype="float16"), R.Tensor((4096,), dtype="float16"), R.Tensor((12288, 512), dtype="uint32"), R.Tensor((12288, 128), dtype="float16"), R.Tensor((4096, 512), dtype="uint32"), R.Tensor((4096, 128), dtype="float16"), R.Tensor((22016, 512), dtype="uint32"), R.Tensor((22016, 128), dtype="float16"), R.Tensor((4096, 1376), dtype="uint32"), R.Tensor((4096, 344), dtype="float16"), R.Tensor((4096,), dtype="float16"), R.Tensor((4096,), dtype="float16"), R.Tensor((12288, 512), dtype="uint32"), R.Tensor((12288, 128), dtype="float16"), R.Tensor((4096, 512), dtype="uint32"), R.Tensor((4096, 128), dtype="float16"), R.Tensor((22016, 512), dtype="uint32"), R.Tensor((22016, 128), dtype="float16"), R.Tensor((4096, 1376), dtype="uint32"), R.Tensor((4096, 344), dtype="float16"), R.Tensor((4096,), dtype="float16"), R.Tensor((4096,), dtype="float16"), R.Tensor((12288, 512), dtype="uint32"), R.Tensor((12288, 128), dtype="float16"), R.Tensor((4096, 512), dtype="uint32"), R.Tensor((4096, 128), dtype="float16"), R.Tensor((22016, 512), dtype="uint32"), R.Tensor((22016, 128), dtype="float16"), R.Tensor((4096, 1376), dtype="uint32"), R.Tensor((4096, 344), dtype="float16"), R.Tensor((4096,), dtype="float16"), R.Tensor((4096,), dtype="float16"), R.Tensor((12288, 512), dtype="uint32"), R.Tensor((12288, 128), dtype="float16"), R.Tensor((4096, 512), dtype="uint32"), R.Tensor((4096, 128), dtype="float16"), R.Tensor((22016, 512), dtype="uint32"), R.Tensor((22016, 128), dtype="float16"), R.Tensor((4096, 1376), dtype="uint32"), R.Tensor((4096, 344), dtype="float16"), R.Tensor((4096,), dtype="float16"), R.Tensor((4096,), dtype="float16"), R.Tensor((12288, 512), dtype="uint32"), R.Tensor((12288, 128), dtype="float16"), R.Tensor((4096, 512), dtype="uint32"), R.Tensor((4096, 128), dtype="float16"), R.Tensor((22016, 512), dtype="uint32"), R.Tensor((22016, 128), dtype="float16"), R.Tensor((4096, 1376), dtype="uint32"), R.Tensor((4096, 344), dtype="float16"), R.Tensor((4096,), dtype="float16"), R.Tensor((4096,), dtype="float16"), R.Tensor((12288, 512), dtype="uint32"), R.Tensor((12288, 128), dtype="float16"), R.Tensor((4096, 512), dtype="uint32"), R.Tensor((4096, 128), dtype="float16"), R.Tensor((22016, 512), dtype="uint32"), R.Tensor((22016, 128), dtype="float16"), R.Tensor((4096, 1376), dtype="uint32"), R.Tensor((4096, 344), dtype="float16"), R.Tensor((4096,), dtype="float16"), R.Tensor((4096,), dtype="float16"), R.Tensor((12288, 512), dtype="uint32"), R.Tensor((12288, 128), dtype="float16"), R.Tensor((4096, 512), dtype="uint32"), R.Tensor((4096, 128), dtype="float16"), R.Tensor((22016, 512), dtype="uint32"), R.Tensor((22016, 128), dtype="float16"), R.Tensor((4096, 1376), dtype="uint32"), R.Tensor((4096, 344), dtype="float16"), R.Tensor((4096,), dtype="float16"), R.Tensor((4096,), dtype="float16"), R.Tensor((12288, 512), dtype="uint32"), R.Tensor((12288, 128), dtype="float16"), R.Tensor((4096, 512), dtype="uint32"), R.Tensor((4096, 128), dtype="float16"), R.Tensor((22016, 512), dtype="uint32"), R.Tensor((22016, 128), dtype="float16"), R.Tensor((4096, 1376), dtype="uint32"), R.Tensor((4096, 344), dtype="float16"), R.Tensor((4096,), dtype="float16"), R.Tensor((4096,), dtype="float16"), R.Tensor((12288, 512), dtype="uint32"), R.Tensor((12288, 128), dtype="float16"), R.Tensor((4096, 512), dtype="uint32"), R.Tensor((4096, 128), dtype="float16"), R.Tensor((22016, 512), dtype="uint32"), R.Tensor((22016, 128), dtype="float16"), R.Tensor((4096, 1376), dtype="uint32"), R.Tensor((4096, 344), dtype="float16"), R.Tensor((4096,), dtype="float16"), R.Tensor((4096,), dtype="float16"), R.Tensor((12288, 512), dtype="uint32"), R.Tensor((12288, 128), dtype="float16"), R.Tensor((4096, 512), dtype="uint32"), R.Tensor((4096, 128), dtype="float16"), R.Tensor((22016, 512), dtype="uint32"), R.Tensor((22016, 128), dtype="float16"), R.Tensor((4096, 1376), dtype="uint32"), R.Tensor((4096, 344), dtype="float16"), R.Tensor((4096,), dtype="float16"), R.Tensor((4096,), dtype="float16"), R.Tensor((12288, 512), dtype="uint32"), R.Tensor((12288, 128), dtype="float16"), R.Tensor((4096, 512), dtype="uint32"), R.Tensor((4096, 128), dtype="float16"), R.Tensor((22016, 512), dtype="uint32"), R.Tensor((22016, 128), dtype="float16"), R.Tensor((4096, 1376), dtype="uint32"), R.Tensor((4096, 344), dtype="float16"), R.Tensor((4096,), dtype="float16"), R.Tensor((4096,), dtype="float16"), R.Tensor((12288, 512), dtype="uint32"), R.Tensor((12288, 128), dtype="float16"), R.Tensor((4096, 512), dtype="uint32"), R.Tensor((4096, 128), dtype="float16"), R.Tensor((22016, 512), dtype="uint32"), R.Tensor((22016, 128), dtype="float16"), R.Tensor((4096, 1376), dtype="uint32"), R.Tensor((4096, 344), dtype="float16"), R.Tensor((4096,), dtype="float16"), R.Tensor((4096,), dtype="float16"), R.Tensor((12288, 512), dtype="uint32"), R.Tensor((12288, 128), dtype="float16"), R.Tensor((4096, 512), dtype="uint32"), R.Tensor((4096, 128), dtype="float16"), R.Tensor((22016, 512), dtype="uint32"), R.Tensor((22016, 128), dtype="float16"), R.Tensor((4096, 1376), dtype="uint32"), R.Tensor((4096, 344), dtype="float16"), R.Tensor((4096,), dtype="float16"), R.Tensor((4096,), dtype="float16"), R.Tensor((12288, 512), dtype="uint32"), R.Tensor((12288, 128), dtype="float16"), R.Tensor((4096, 512), dtype="uint32"), R.Tensor((4096, 128), dtype="float16"), R.Tensor((22016, 512), dtype="uint32"), R.Tensor((22016, 128), dtype="float16"), R.Tensor((4096, 1376), dtype="uint32"), R.Tensor((4096, 344), dtype="float16"), R.Tensor((4096,), dtype="float16"), R.Tensor((4096,), dtype="float16"), R.Tensor((4096,), dtype="float16"), R.Tensor((v, 512), dtype="uint32"), R.Tensor((v, 128), dtype="float16"), R.Tensor((cache_len, 128), dtype="float16"), R.Tensor((cache_len, 128), dtype="float16"))) expect a Tuple with 327 elements, but get a Tuple with 0 elements.
Stack trace:
[bt] (0) /mnt/ssd/chenf/opensource/mlc-llm/build/tvm/libtvm_runtime.so(tvm::runtime::Backtrace[abi:cxx11]()+0x2c) [0x7f58a951c3ac]
[bt] (1) /mnt/ssd/chenf/opensource/mlc-llm/build/mlc_chat_cli(tvm::runtime::detail::LogFatal::Entry::Finalize()+0x3d) [0x56421e368aad]
[bt] (2) /mnt/ssd/chenf/opensource/mlc-llm/build/tvm/libtvm_runtime.so(tvm::runtime::relax_vm::CheckTupleInfo(tvm::runtime::ObjectRef, long, tvm::runtime::Optional<tvm::runtime::String>)+0x29f) [0x7f58a957515f]
[bt] (3) /mnt/ssd/chenf/opensource/mlc-llm/build/tvm/libtvm_runtime.so(tvm::runtime::PackedFuncObj::Extractor<tvm::runtime::PackedFuncSubObj<tvm::runtime::TypedPackedFunc<void (tvm::runtime::ObjectRef, long, tvm::runtime::Optional<tvm::runtime::String>)>::AssignTypedLambda<void (*)(tvm::runtime::ObjectRef, long, tvm::runtime::Optional<tvm::runtime::String>)>(void (*)(tvm::runtime::ObjectRef, long, tvm::runtime::Optional<tvm::runtime::String>), std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> >)::{lambda(tvm::runtime::TVMArgs const&, tvm::runtime::TVMRetValue*)#1}> >::Call(tvm::runtime::PackedFuncObj const*, tvm::runtime::TVMArgs, tvm::runtime::TVMRetValue*)+0x216) [0x7f58a9589c96]
[bt] (4) /mnt/ssd/chenf/opensource/mlc-llm/build/tvm/libtvm_runtime.so(tvm::runtime::relax_vm::VirtualMachineImpl::InvokeClosurePacked(tvm::runtime::ObjectRef const&, tvm::runtime::TVMArgs, tvm::runtime::TVMRetValue*)+0x7d) [0x7f58a95d271d]
[bt] (5) /mnt/ssd/chenf/opensource/mlc-llm/build/tvm/libtvm_runtime.so(tvm::runtime::relax_vm::VirtualMachineImpl::RunInstrCall(tvm::runtime::relax_vm::VMFrame*, tvm::runtime::relax_vm::Instruction)+0x90b) [0x7f58a95d471b]
[bt] (6) /mnt/ssd/chenf/opensource/mlc-llm/build/tvm/libtvm_runtime.so(tvm::runtime::relax_vm::VirtualMachineImpl::RunLoop()+0x235) [0x7f58a95d3ba5]
[bt] (7) /mnt/ssd/chenf/opensource/mlc-llm/build/tvm/libtvm_runtime.so(tvm::runtime::relax_vm::VirtualMachineImpl::InvokeBytecode(long, std::vector<tvm::runtime::TVMRetValue, std::allocator<tvm::runtime::TVMRetValue> > const&)+0x2a4) [0x7f58a95d56b4]
[bt] (8) /mnt/ssd/chenf/opensource/mlc-llm/build/tvm/libtvm_runtime.so(+0x218cba) [0x7f58a95d5cba]To Reproduce
Steps to reproduce the behavior:
- compile mlc runtime from source (use tvm in submodule); mlc-llm commit: 5e23900
- download model and prebuild_lib from provided url;
- execute command:
/mnt/ssd/chenf/opensource/mlc-llm/build/mlc_chat_cli --model-lib-path prebuilt_libs/Llama-2-7b-chat-hf-q4f16_1-cuda.so --model Llama-2-7b-chat-hf-q4f16_1-MLC/
Environment
- Platform (e.g. WebGPU/Vulkan/IOS/Android/CUDA): CUDA
- Operating system (e.g. Ubuntu/Windows/MacOS/...): Ubuntu 20.04
- Device (e.g. iPhone 12 Pro, PC+RTX 3090, ...): Tesla V100
- How you installed MLC-LLM (
conda, source): source - How you installed TVM-Unity (
pip, source): source - Python version (e.g. 3.10): 3.10
- GPU driver version (if applicable):
- CUDA/cuDNN version (if applicable):
- TVM Unity Hash Tag (
python -c "import tvm; print('\n'.join(f'{k}: {v}' for k, v in tvm.support.libinfo().items()))", applicable if you compile models):
USE_NVTX: OFF
USE_GTEST: AUTO
SUMMARIZE: OFF
USE_IOS_RPC: OFF
USE_MSC: OFF
USE_ETHOSU: OFF
CUDA_VERSION: 11.6
USE_LIBBACKTRACE: AUTO
DLPACK_PATH: 3rdparty/dlpack/include
USE_TENSORRT_CODEGEN: OFF
USE_THRUST: OFF
USE_TARGET_ONNX: OFF
USE_AOT_EXECUTOR: ON
BUILD_DUMMY_LIBTVM: OFF
USE_CUDNN: OFF
USE_TENSORRT_RUNTIME: OFF
USE_ARM_COMPUTE_LIB_GRAPH_EXECUTOR: OFF
USE_CCACHE: AUTO
USE_ARM_COMPUTE_LIB: OFF
USE_CPP_RTVM: OFF
USE_OPENCL_GTEST: /path/to/opencl/gtest
USE_MKL: OFF
USE_PT_TVMDSOOP: OFF
MLIR_VERSION: NOT-FOUND
USE_CLML: OFF
USE_STACKVM_RUNTIME: OFF
USE_GRAPH_EXECUTOR_CUDA_GRAPH: OFF
ROCM_PATH: /opt/rocm
USE_DNNL: OFF
USE_VITIS_AI: OFF
USE_MLIR: OFF
USE_RCCL: OFF
USE_LLVM: /mnt/ssd/chenf/software/miniconda3/envs/relax-py10/bin/llvm-config
USE_VERILATOR: OFF
USE_TF_TVMDSOOP: OFF
USE_THREADS: ON
USE_MSVC_MT: OFF
BACKTRACE_ON_SEGFAULT: OFF
USE_GRAPH_EXECUTOR: ON
USE_NCCL: OFF
USE_ROCBLAS: OFF
GIT_COMMIT_HASH: 7dfc863df8b6c9227a03547e5a0bf23f44c3f62d
USE_VULKAN: OFF
USE_RUST_EXT: OFF
USE_CUTLASS: OFF
USE_CPP_RPC: OFF
USE_HEXAGON: OFF
USE_CUSTOM_LOGGING: OFF
USE_UMA: OFF
USE_FALLBACK_STL_MAP: OFF
USE_SORT: ON
USE_RTTI: ON
GIT_COMMIT_TIME: 2024-01-04 15:14:07 +0800
USE_HEXAGON_SDK: /path/to/sdk
USE_BLAS: none
USE_ETHOSN: OFF
USE_LIBTORCH: OFF
USE_RANDOM: ON
USE_CUDA: ON
USE_COREML: OFF
USE_AMX: OFF
BUILD_STATIC_RUNTIME: OFF
USE_CMSISNN: OFF
USE_KHRONOS_SPIRV: OFF
USE_CLML_GRAPH_EXECUTOR: OFF
USE_TFLITE: OFF
USE_HEXAGON_GTEST: /path/to/hexagon/gtest
PICOJSON_PATH: 3rdparty/picojson
USE_OPENCL_ENABLE_HOST_PTR: OFF
INSTALL_DEV: OFF
USE_PROFILER: ON
USE_NNPACK: OFF
LLVM_VERSION: 17.0.6
USE_OPENCL: OFF
COMPILER_RT_PATH: 3rdparty/compiler-rt
RANG_PATH: 3rdparty/rang/include
USE_SPIRV_KHR_INTEGER_DOT_PRODUCT: OFF
USE_OPENMP: none
USE_BNNS: OFF
USE_CUBLAS: OFF
USE_METAL: OFF
USE_MICRO_STANDALONE_RUNTIME: OFF
USE_HEXAGON_EXTERNAL_LIBS: OFF
USE_ALTERNATIVE_LINKER: AUTO
USE_BYODT_POSIT: OFF
USE_HEXAGON_RPC: OFF
USE_MICRO: OFF
DMLC_PATH: 3rdparty/dmlc-core/include
INDEX_DEFAULT_I64: ON
USE_RELAY_DEBUG: OFF
USE_RPC: ON
USE_TENSORFLOW_PATH: none
TVM_CLML_VERSION:
USE_MIOPEN: OFF
USE_ROCM: OFF
USE_PAPI: OFF
USE_CURAND: OFF
TVM_CXX_COMPILER_PATH: /usr/bin/c++
HIDE_PRIVATE_SYMBOLS: ON- Any other relevant information:
Metadata
Metadata
Assignees
Labels
bugConfirmed bugsConfirmed bugs