Skip to content

Commit 98071c7

Browse files
authored
Merge branch 'main' into feature/2022-06-16/use-curand
2 parents e3445bc + 4b15746 commit 98071c7

File tree

61 files changed

+2939
-325
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

61 files changed

+2939
-325
lines changed

apps/microtvm/arduino/template_project/microtvm_api_server.py

Lines changed: 12 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -214,14 +214,21 @@ def _template_model_header(self, source_dir, metadata):
214214
with open(source_dir / "model.h", "r") as f:
215215
model_h_template = Template(f.read())
216216

217-
assert (
218-
metadata["style"] == "full-model"
217+
all_module_names = []
218+
for name in metadata["modules"].keys():
219+
all_module_names.append(name)
220+
221+
assert all(
222+
metadata["modules"][mod_name]["style"] == "full-model" for mod_name in all_module_names
219223
), "when generating AOT, expect only full-model Model Library Format"
220224

221-
template_values = {
222-
"workspace_size_bytes": metadata["memory"]["functions"]["main"][0][
225+
workspace_size_bytes = 0
226+
for mod_name in all_module_names:
227+
workspace_size_bytes += metadata["modules"][mod_name]["memory"]["functions"]["main"][0][
223228
"workspace_size_bytes"
224-
],
229+
]
230+
template_values = {
231+
"workspace_size_bytes": workspace_size_bytes,
225232
}
226233

227234
with open(source_dir / "model.h", "w") as f:

docker/Dockerfile.ci_gpu

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -139,6 +139,10 @@ COPY install/ubuntu_install_sccache.sh /install/ubuntu_install_sccache.sh
139139
RUN bash /install/ubuntu_install_sccache.sh
140140
ENV PATH /opt/sccache:$PATH
141141

142+
# dnnl
143+
COPY install/ubuntu_install_dnnl.sh /install/ubuntu_install_dnnl.sh
144+
RUN bash /install/ubuntu_install_dnnl.sh
145+
142146
# Environment variables
143147
ENV PATH=/usr/local/nvidia/bin:${PATH}
144148
ENV PATH=/usr/local/cuda/bin:${PATH}

docker/clear-stale-images.sh

Lines changed: 113 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,113 @@
1+
#!/bin/bash
2+
# Licensed to the Apache Software Foundation (ASF) under one
3+
# or more contributor license agreements. See the NOTICE file
4+
# distributed with this work for additional information
5+
# regarding copyright ownership. The ASF licenses this file
6+
# to you under the Apache License, Version 2.0 (the
7+
# "License"); you may not use this file except in compliance
8+
# with the License. You may obtain a copy of the License at
9+
#
10+
# http://www.apache.org/licenses/LICENSE-2.0
11+
#
12+
# Unless required by applicable law or agreed to in writing,
13+
# software distributed under the License is distributed on an
14+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
15+
# KIND, either express or implied. See the License for the
16+
# specific language governing permissions and limitations
17+
# under the License.
18+
#
19+
# Remove tvm-related docker images from the local system which
20+
# are not used by the currently-checked-out branch in this git
21+
# repository plus any linked worktrees.
22+
23+
set -euo pipefail
24+
25+
dry_run=0
26+
repositories=( "$(cd $(dirname "$0") && git rev-parse --show-toplevel)" )
27+
skip_confirm=0
28+
verbose=0
29+
while [ "${1+x}" == "x" ]; do
30+
case "$1" in
31+
--help|-h)
32+
echo "usage: $0 [-n] [-v] [-y] <repository> [<repository> ...]"
33+
echo ""
34+
echo "Remove tvm-related docker images from the local system which"
35+
echo "are not used by the currently-checked-out branch in this git"
36+
echo "repository plus any linked worktrees."
37+
echo ""
38+
echo 'This command should remove only docker images beginning with "tlcpack"'
39+
echo ""
40+
echo "Options:"
41+
echo " -n Perform a dry-run and just print the docker rmi command"
42+
echo " -v Verbosely list the images kept and why"
43+
echo " -y Skip confirmation"
44+
echo " <repository> Additional git repositories to consult."
45+
exit 2
46+
;;
47+
-n)
48+
dry_run=1
49+
;;
50+
-v)
51+
verbose=1
52+
;;
53+
-y)
54+
skip_confirm=1
55+
;;
56+
*)
57+
repositories=( "${repositories[@]}" "$1" )
58+
;;
59+
esac
60+
shift
61+
done
62+
63+
declare -a used_images
64+
for r in "${repositories[@]}"; do
65+
if [ -d "${r}/.git" ]; then
66+
worktree="${r}"
67+
else
68+
worktree="$(cat "${r}/.git")"
69+
fi
70+
while read wt; do
71+
d="${wt:9:${#wt}}" # strip "worktree " prefix
72+
for img in $(cat "${d}/Jenkinsfile" | grep -E '^ci_[a-z]+ = ' | sed -E "s/ci_[a-z]+ = '([^\"]*)'/\1/"); do
73+
used_images=( "${used_images[@]}" "${img}" )
74+
done
75+
done < <(cd "${worktree}" && git worktree list --porcelain | grep '^worktree ')
76+
done
77+
78+
declare -a to_rm
79+
while read image; do
80+
if [ "${image}" == "<none>:<none>" ]; then
81+
continue
82+
fi
83+
grep -qE "^tlcpack" < <(echo "$image") && is_tlcpack=1 || is_tlcpack=0
84+
if [ $is_tlcpack -eq 0 ]; then # non-tlcpack image
85+
if [ $verbose -ne 0 ]; then
86+
echo "skipping (non-tvm): $image"
87+
fi
88+
continue
89+
fi
90+
grep -q "$image" < <(echo "${used_images[@]}") && is_used=1 || is_used=0
91+
if [ $is_used -eq 1 ]; then # Image was found in used_images
92+
if [ $verbose -ne 0 ]; then
93+
echo "skipping (image used): $image"
94+
fi
95+
continue
96+
fi
97+
to_rm=( "${to_rm[@]}" "${image}" )
98+
done < <(docker images --format '{{.Repository}}:{{.Tag}}')
99+
100+
docker_cmd=( docker rmi "${to_rm[@]}" )
101+
if [ ${dry_run} -ne 0 ]; then
102+
echo "would run: ${docker_cmd[@]}"
103+
else
104+
if [ $skip_confirm -eq 0 ]; then
105+
echo "will run: ${docker_cmd[@]}"
106+
read -p "Proceed? [y/N] " proceed
107+
if [ "${proceed-}" != "y" -a "${proceed-}" != "Y" ]; then
108+
echo "Aborted."
109+
exit 2
110+
fi
111+
fi
112+
"${docker_cmd[@]}"
113+
fi

docs/contribute/pull_request.rst

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -113,6 +113,14 @@ each time (e.g. you can test a change in CPU and i386 while retaining incrementa
113113
# run the CPU build and drop into a shell in the container
114114
python tests/scripts/ci.py cpu --interactive
115115
116+
We regularly update our docker images and, over time, stale images may unnecessarily consume disk
117+
space. You can remove stale images that aren't used in the presently checked-out branch plus any
118+
other worktrees using the following command:
119+
120+
.. code:: bash
121+
docker/clear-stale-images.sh
122+
123+
Consult the ``--help`` for more options.
116124

117125
C++ (local)
118126
^^^^^^^^^^^

include/tvm/meta_schedule/database.h

Lines changed: 27 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -98,6 +98,9 @@ struct WorkloadEqual {
9898
}
9999
};
100100

101+
/*! \brief The class of measure candidates. */
102+
class MeasureCandidate;
103+
101104
/*! \brief The class of tuning records. */
102105
class TuningRecordNode : public runtime::Object {
103106
public:
@@ -123,6 +126,9 @@ class TuningRecordNode : public runtime::Object {
123126
static constexpr const char* _type_key = "meta_schedule.TuningRecord";
124127
TVM_DECLARE_FINAL_OBJECT_INFO(TuningRecordNode, runtime::Object);
125128

129+
/*! \brief Construct the measure candidate given the initial IR module and trace
130+
* stored in the tuning record. */
131+
MeasureCandidate AsMeasureCandidate() const;
126132
/*!
127133
* \brief Export the tuning record to a JSON string.
128134
* \return An array containing the trace, running secs, serialized target, and
@@ -187,6 +193,11 @@ class DatabaseNode : public runtime::Object {
187193
* \return An array of top K tuning records for the given workload.
188194
*/
189195
virtual Array<TuningRecord> GetTopK(const Workload& workload, int top_k) = 0;
196+
/*!
197+
* \brief Get all tuning records from the database.
198+
* \return An Array of all the tuning records in the database.
199+
*/
200+
virtual Array<TuningRecord> GetAllTuningRecords() = 0;
190201
/*!
191202
* \brief Get the size of the database.
192203
* \return The size of the database.
@@ -224,6 +235,11 @@ class PyDatabaseNode : public DatabaseNode {
224235
* \return An array of top K tuning records for the given workload.
225236
*/
226237
using FGetTopK = runtime::TypedPackedFunc<Array<TuningRecord>(const Workload&, int)>;
238+
/*!
239+
* \brief The function type of `GetAllTuningRecords` method.
240+
* \return An Array of all the tuning records in the database.
241+
*/
242+
using FGetAllTuningRecords = runtime::TypedPackedFunc<Array<TuningRecord>()>;
227243
/*!
228244
* \brief The function type of `Size` method.
229245
* \return The size of the database.
@@ -238,6 +254,8 @@ class PyDatabaseNode : public DatabaseNode {
238254
FCommitTuningRecord f_commit_tuning_record;
239255
/*! \brief The packed function to the `GetTopK` function. */
240256
FGetTopK f_get_top_k;
257+
/*! \brief The packed function to the `GetAllTuningRecords` function. */
258+
FGetAllTuningRecords f_get_all_tuning_records;
241259
/*! \brief The packed function to the `Size` function. */
242260
FSize f_size;
243261

@@ -249,6 +267,7 @@ class PyDatabaseNode : public DatabaseNode {
249267
// `f_commit_workload` is not visited
250268
// `f_commit_tuning_record` is not visited
251269
// `f_get_top_k` is not visited
270+
// `f_get_all_tuning_records` is not visited
252271
// `f_size` is not visited
253272
}
254273

@@ -273,6 +292,12 @@ class PyDatabaseNode : public DatabaseNode {
273292
return f_get_top_k(workload, top_k);
274293
}
275294

295+
Array<TuningRecord> GetAllTuningRecords() final {
296+
ICHECK(f_get_all_tuning_records != nullptr)
297+
<< "PyDatabase's GetAllTuningRecords method not implemented!";
298+
return f_get_all_tuning_records();
299+
}
300+
276301
int64_t Size() final {
277302
ICHECK(f_size != nullptr) << "PyDatabase's Size method not implemented!";
278303
return f_size();
@@ -302,13 +327,15 @@ class Database : public runtime::ObjectRef {
302327
* \param f_commit_workload The packed function of `CommitWorkload`.
303328
* \param f_commit_tuning_record The packed function of `CommitTuningRecord`.
304329
* \param f_get_top_k The packed function of `GetTopK`.
330+
* \param f_get_all_tuning_records The packed function of `GetAllTuningRecords`.
305331
* \param f_size The packed function of `Size`.
306332
* \return The created database.
307333
*/
308334
TVM_DLL static Database PyDatabase(PyDatabaseNode::FHasWorkload f_has_workload,
309335
PyDatabaseNode::FCommitWorkload f_commit_workload,
310336
PyDatabaseNode::FCommitTuningRecord f_commit_tuning_record,
311337
PyDatabaseNode::FGetTopK f_get_top_k,
338+
PyDatabaseNode::FGetAllTuningRecords f_get_all_tuning_records,
312339
PyDatabaseNode::FSize f_size);
313340
TVM_DEFINE_MUTABLE_NOTNULLABLE_OBJECT_REF_METHODS(Database, runtime::ObjectRef, DatabaseNode);
314341
};

python/tvm/auto_scheduler/testing/tune_onnx.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -28,6 +28,7 @@
2828
from tvm.meta_schedule.testing.custom_builder_runner import run_module_via_rpc
2929
from tvm.meta_schedule.utils import cpu_count
3030
from tvm.relay.frontend import from_onnx
31+
from tvm.support import describe
3132

3233

3334
def _parse_args():
@@ -148,6 +149,7 @@ def main():
148149
else:
149150
raise NotImplementedError(f"Unsupported target {ARGS.target}")
150151

152+
describe()
151153
print(f"Workload: {ARGS.model_name}")
152154
onnx_model = onnx.load(ARGS.onnx_path)
153155
shape_dict = {}

python/tvm/auto_scheduler/testing/tune_relay.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -146,14 +146,16 @@ def main():
146146
)
147147
else:
148148
raise NotImplementedError(f"Unsupported target {ARGS.target}")
149+
150+
describe()
151+
print(f"Workload: {ARGS.workload}")
149152
mod, params, (input_name, input_shape, input_dtype) = get_network(
150153
ARGS.workload,
151154
ARGS.input_shape,
152155
cache_dir=ARGS.cache_dir,
153156
)
154157
input_info = {input_name: input_shape}
155158
input_data = {}
156-
print(f"Workload: {ARGS.workload}")
157159
for input_name, input_shape in input_info.items():
158160
print(f" input_name: {input_name}")
159161
print(f" input_shape: {input_shape}")

python/tvm/auto_scheduler/testing/tune_te.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -91,6 +91,8 @@ def _parse_args():
9191

9292

9393
def main():
94+
describe()
95+
print(f"Workload: {ARGS.workload}")
9496
log_file = os.path.join(ARGS.work_dir, f"{ARGS.workload}.json")
9597
workload_func, params = CONFIGS[ARGS.workload]
9698
params = params[0] # type: ignore

python/tvm/contrib/pipeline_executor.py

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -55,6 +55,7 @@ def __init__(self, module):
5555
self._get_input = self.module["get_input"]
5656
self._get_output = self.module["get_output"]
5757
self._get_num_outputs = self.module["get_num_outputs"]
58+
self._get_num_inputs = self.module["get_num_inputs"]
5859
self._get_input_pipeline_map = self.module["get_input_pipeline_map"]
5960
self._get_pipe_execute_count = self.module["get_execute_count"]
6061

@@ -159,6 +160,16 @@ def num_outputs(self):
159160
"""
160161
return self._get_num_outputs()
161162

163+
@property
164+
def num_inputs(self):
165+
"""Get the number of inputs
166+
Returns
167+
-------
168+
count : int
169+
The number of inputs
170+
"""
171+
return self._get_num_inputs()
172+
162173
@staticmethod
163174
def load_library(config_file_name):
164175
"""Import files to create a pipeline executor.

python/tvm/driver/tvmc/model.py

Lines changed: 14 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -391,9 +391,20 @@ def import_package(self, package_path: str):
391391
with open(temp.relpath("metadata.json")) as metadata_json:
392392
metadata = json.load(metadata_json)
393393

394-
has_graph_executor = "graph" in metadata["executors"]
395-
graph = temp.relpath("executor-config/graph/graph.json") if has_graph_executor else None
396-
params = temp.relpath(f'parameters/{metadata["model_name"]}.params')
394+
all_module_names = []
395+
for name in metadata["modules"].keys():
396+
all_module_names.append(name)
397+
assert len(all_module_names) == 1, "Multiple modules in MLF is not supported."
398+
399+
module_name = all_module_names[0]
400+
module_metdata = metadata["modules"][module_name]
401+
has_graph_executor = "graph" in module_metdata["executors"]
402+
graph = (
403+
temp.relpath(f"executor-config/graph/{module_name}.graph")
404+
if has_graph_executor
405+
else None
406+
)
407+
params = temp.relpath(f"parameters/{module_name}.params")
397408

398409
self.type = "mlf"
399410
else:

0 commit comments

Comments
 (0)