Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion docs/Doxyfile
Original file line number Diff line number Diff line change
Expand Up @@ -306,7 +306,7 @@ EXTENSION_MAPPING =

# When enabled doxygen tries to link words that correspond to documented
# classes, or namespaces to their corresponding documentation. Such a link can
# be prevented in individual cases by by putting a % sign in front of the word
# be prevented in individual cases by putting a % sign in front of the word
# or globally by setting AUTOLINK_SUPPORT to NO.
# The default value is: YES.

Expand Down
4 changes: 2 additions & 2 deletions docs/conf.py
Original file line number Diff line number Diff line change
Expand Up @@ -244,15 +244,15 @@ def install_request_hook(gallery_conf, fname):
# Installs the latest dev build of TVM from PyPI, with CUDA enabled. To use this,
# you must request a Google Colab instance with a GPU by going to Runtime ->
# Change runtime type -> Hardware accelerator -> GPU. If you wish to build from
# source, see see https://tvm.apache.org/docs/install/from_source.html
# source, see https://tvm.apache.org/docs/install/from_source.html
pip install tlcpack-nightly-cu113 --pre -f https://tlcpack.ai/wheels"""

INSTALL_TVM_CUDA_FIXED = f"""\
%%shell
# Installs TVM version {version} from PyPI, with CUDA enabled. To use this,
# you must request a Google Colab instance with a GPU by going to Runtime ->
# Change runtime type -> Hardware accelerator -> GPU. If you wish to build from
# source, see see https://tvm.apache.org/docs/install/from_source.html
# source, see https://tvm.apache.org/docs/install/from_source.html
pip install apache-tvm-cu113=={version} -f https://tlcpack.ai/wheels"""


Expand Down
2 changes: 1 addition & 1 deletion include/tvm/runtime/logging.h
Original file line number Diff line number Diff line change
Expand Up @@ -113,7 +113,7 @@
* in a function, or 'continue' or 'break' in a loop)
* The default behavior when quit_on_assertion is false, is to 'return false'. If this is not
* desirable, the macro caller can pass one more last parameter to COND_X to tell COND_X what
* to do when when quit_on_assertion is false and the assertion fails.
* to do when quit_on_assertion is false and the assertion fails.
*
* Rationale: These macros were designed to implement functions that have two behaviors
* in a concise way. Those behaviors are quitting on assertion failures, or trying to
Expand Down
2 changes: 1 addition & 1 deletion include/tvm/runtime/ndarray.h
Original file line number Diff line number Diff line change
Expand Up @@ -275,7 +275,7 @@ class NDArray::ContainerBase {
protected:
/*!
* \brief The shape container,
* can be used used for shape data.
* can be used for shape data.
*/
ShapeTuple shape_;
};
Expand Down
2 changes: 1 addition & 1 deletion include/tvm/runtime/packed_func.h
Original file line number Diff line number Diff line change
Expand Up @@ -133,7 +133,7 @@ class PackedFuncSubObj : public PackedFuncObj {
* The arguments are passed by packed format.
*
* This is an useful unified interface to call generated functions,
* It is the unified function function type of TVM.
* It is the unified function type of TVM.
* It corresponds to TVMFunctionHandle in C runtime API.
*/
class PackedFunc : public ObjectRef {
Expand Down
2 changes: 1 addition & 1 deletion include/tvm/tir/expr.h
Original file line number Diff line number Diff line change
Expand Up @@ -1030,7 +1030,7 @@ class CommReducer : public ObjectRef {
TVM_DEFINE_OBJECT_REF_METHODS(CommReducer, ObjectRef, CommReducerNode);
};

/*! \brief Reduction operator operator */
/*! \brief Reduction operator */
class ReduceNode : public PrimExprNode {
public:
/*! \brief The commutative combiner */
Expand Down
2 changes: 1 addition & 1 deletion include/tvm/tir/stmt.h
Original file line number Diff line number Diff line change
Expand Up @@ -778,7 +778,7 @@ class SeqStmt : public Stmt {
}

// If the argument is a single SeqStmt argument with no
// flattening or unwrapping required required, then we may
// flattening or unwrapping required, then we may
// return the SeqStmt as-is.
if constexpr (sizeof...(seq_args) == 1) {
if (auto opt = Flattener::AsSeqStmt(std::forward<Args>(seq_args)...)) {
Expand Down
2 changes: 1 addition & 1 deletion python/tvm/relay/op/contrib/clml.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@


def clml_sdk_version():
"""Utility function to get clml version version"""
"""Utility function to get clml version"""

return int(tvm.support.libinfo().get("TVM_CLML_VERSION", 2))

Expand Down
2 changes: 1 addition & 1 deletion python/tvm/relay/transform/memory_plan.py
Original file line number Diff line number Diff line change
Expand Up @@ -287,7 +287,7 @@ def process_alloc_storage(self, dynamic_regions, lhs, call):
dynamic_regions.append(lhs)
else:
# A new scope is created when entering a new region with different
# device device.
# device.
region = self.current_region(dtype)
if region.device and region.device.device_type != dev.device_type:
self.enter_scope()
Expand Down
2 changes: 1 addition & 1 deletion python/tvm/runtime/ndarray.py
Original file line number Diff line number Diff line change
Expand Up @@ -617,7 +617,7 @@ def array(arr, device=cpu(0), mem_scope=None):
The array to be copied from

device : Device, optional
The device device to create the array
The device to create the array

mem_scope : Optional[str]
The memory scope of the array
Expand Down
2 changes: 1 addition & 1 deletion python/tvm/te/hybrid/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@


def script(pyfunc):
"""Decorate a python function function as hybrid script.
"""Decorate a python function as hybrid script.

The hybrid function support emulation mode and parsing to
the internal language IR.
Expand Down
2 changes: 1 addition & 1 deletion python/tvm/te/schedule.py
Original file line number Diff line number Diff line change
Expand Up @@ -648,7 +648,7 @@ def __exit__(self, ptype, value, trace):


# Sentinel value used to indicate which groups of pre-flattening axes
# should be used to post-flattening axes axes. Moved from
# should be used to post-flattening axes. Moved from
# te.AXIS_SEPARATOR to tir.IndexMap.AXIS_SEPARATOR for general use,
# maintained here for backwards compatibility.
AXIS_SEPARATOR = IndexMap.AXIS_SEPARATOR
Expand Down
2 changes: 1 addition & 1 deletion python/tvm/topi/arm_cpu/qnn.py
Original file line number Diff line number Diff line change
Expand Up @@ -309,7 +309,7 @@ def qnn_conv2d(attrs, inputs, out_type):
# the output width, but autotuning this value would improve performance a lot.
num_outputs = _pick_num_outputs(out_width)

# Next, decide whether whether we need "parity alternation". For example, if we have an
# Next, decide whether we need "parity alternation". For example, if we have an
# 8x3x3x3 kernel (8 output channels, height 3, width 3, input channels 3) in the OHWI layout,
# then every output channel kernel slice will be 27 halfwords. This means every other output
# channel will not be word aligned, which will cause slowness/crashes!
Expand Down
4 changes: 2 additions & 2 deletions src/arith/const_fold.h
Original file line number Diff line number Diff line change
Expand Up @@ -441,7 +441,7 @@ struct SymbolicLimits {
/*!
* \brief Opaque expression representing positive infinity.
*
* It can can only be used as parameter of by min/max
* It can only be used as parameter of by min/max
* for integer analysis and cannot be used in normal expressions.
*
* \return positive infinity.
Expand All @@ -459,7 +459,7 @@ inline bool is_pos_inf(const PrimExpr& value) { return value.same_as(SymbolicLim
/*!
* \brief Opaque expression representing negative infinity.
*
* It can can only be used as parameter of by min/max
* It can only be used as parameter of by min/max
* for integer analysis and cannot be used in normal expressions.
*
* \return negative infinity.
Expand Down
4 changes: 2 additions & 2 deletions src/arith/product_normal_form.h
Original file line number Diff line number Diff line change
Expand Up @@ -66,11 +66,11 @@ inline void UnpackSum(const PrimExpr& value, FLeaf fleaf, int sign = 1) {
}

/*!
* \brief Helper function to multiply extent and and re-normalize.
* \brief Helper function to multiply extent and re-normalize.
*
* Multiply extent scale and re-normalize to form (x * y) * z
*
* NOTE on multiplication order: when have have shape (s[0], s[1], s[2]),
* NOTE on multiplication order: when have shape (s[0], s[1], s[2]),
* we prefer to multiple in order of s[0] * s[1] * s[2]

* \param lhs The lhs iterator
Expand Down
2 changes: 1 addition & 1 deletion src/relay/collage/partition_rule.h
Original file line number Diff line number Diff line change
Expand Up @@ -375,7 +375,7 @@ class OpCallByKindPartitionRule : public PartitionRule {
*
* Kinds are ordered as above from least- to most-constraining w.r.t. possible partition
* opportunities. When we write a kind abbreviation below we intend it to mean that kind *or less*.
* And when when write 'kl -> kr' we mean it to match a sub-expression of kind kr or less who's
* And when write 'kl -> kr' we mean it to match a sub-expression of kind kr or less who's
* dataflow inputs are all of kind kl or less.
*
* We can then mimic the classic \p FuseOps TVM Pass with the following more primitive combiner
Expand Down
2 changes: 1 addition & 1 deletion src/relay/transforms/combine_parallel_op_batch.h
Original file line number Diff line number Diff line change
Expand Up @@ -133,7 +133,7 @@ class ParallelOpBatchCombiner : public ParallelOpCombiner {

private:
/* \brief name of op to replace combined ops with. for example,
* for combining parallel dense, this will will be set to
* for combining parallel dense, this will be set to
* nn.batch_matmul
*/
std::string batch_op_name_;
Expand Down
2 changes: 1 addition & 1 deletion src/runtime/c_runtime_api.cc
Original file line number Diff line number Diff line change
Expand Up @@ -215,7 +215,7 @@ void DeviceAPI::SyncStreamFromTo(Device dev, TVMStreamHandle event_src, TVMStrea
/*!
* \brief Normalize error message
*
* Parse them header generated by by LOG(FATAL) and ICHECK
* Parse them header generated by LOG(FATAL) and ICHECK
* and reformat the message into the standard format.
*
* This function will also merge all the stack traces into
Expand Down
2 changes: 1 addition & 1 deletion src/runtime/crt/aot_executor/aot_executor.c
Original file line number Diff line number Diff line change
Expand Up @@ -228,7 +228,7 @@ int TVMAotExecutor_Release(TVMAotExecutor* executor, const DLDevice device) {
int status;

if (executor->num_args > 0) {
// free TVMNDArray data memory for each each argument
// free TVMNDArray data memory for each argument
int i;
for (i = 0; i < executor->num_args; ++i) {
status = TVMNDArray_Release(&executor->args[i]);
Expand Down
2 changes: 1 addition & 1 deletion src/target/source/ptx.h
Original file line number Diff line number Diff line change
Expand Up @@ -146,7 +146,7 @@ std::string PrintArriveBarrierAsm(const std::string& barrier);
/*!
* \brief Print ptx barrier arrival with expect tx operation using mbarrier.arrive.expect_tx
* \param barrier: The name of the barrier in shared memory.
* \param byte_count: Increases the the tx count of the mbarrier object to track completion of
* \param byte_count: Increases the tx count of the mbarrier object to track completion of
* addtional async transactions.
*/
std::string PrintArriveBarrierExpectTxAsm(const std::string& barrier,
Expand Down
2 changes: 1 addition & 1 deletion src/tir/analysis/control_flow_graph.h
Original file line number Diff line number Diff line change
Expand Up @@ -129,7 +129,7 @@ struct BufferTouch {
* accessed by this touch during this loop iteration or a
* subsequent loop iteration.
*
* Used during backward propagation, to track indices that that are
* Used during backward propagation, to track indices that are
* overwritten in the current loop iteration or in a later loop
* iteration.
*/
Expand Down
2 changes: 1 addition & 1 deletion src/tir/schedule/error.h
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,7 @@ class LoopPositionError : public ScheduleError {
String DetailRenderTemplate() const final {
std::ostringstream os;
os << "ScheduleError: The input loop {0} of " << primitive_
<< " is required to be be an ancestor of block {1}.";
<< " is required to be an ancestor of block {1}.";
return os.str();
}

Expand Down
2 changes: 1 addition & 1 deletion src/tir/transforms/unroll_loop.cc
Original file line number Diff line number Diff line change
Expand Up @@ -247,7 +247,7 @@ class LoopUnroller : public StmtExprMutator {
int auto_max_step_;
int auto_max_depth_;
// max extent of loop to auto unroll
// this not not count the total steps, only count the number of loops
// this does not count the total steps, only count the number of loops
int auto_max_extent_;
bool explicit_unroll_;
// Wether to unroll loops to local access.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -124,7 +124,7 @@ def load(cls, file_name):


def get_low_high_atol_rtol(dtype):
"""Returns a tuple with boundary values and and tolerance for ACL tests."""
"""Returns a tuple with boundary values and tolerance for ACL tests."""

if dtype == "float32":
low, high, atol, rtol = (-127, 128, 0.001, 0.001)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -773,7 +773,7 @@ The input cache grows to hold the vertically adjacent slice:

*Filter Cache*

The filter cache grows to hold the 3x3 filter filter:
The filter cache grows to hold the 3x3 filter:

```
allocate(packed_filter.global: Pointer(global float32), float32, [73728]), storage_scope = global;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -345,7 +345,7 @@ def _get_elemwise_add_reference_value_tensors(shape: list, dtype: str):
next_value += 1

elif np_dtype.kind == "f":
# NOTE: For simplicity, we avoid test data that that require
# NOTE: For simplicity, we avoid test data that require
# well-defined behavior on floating-point overflow.
# But it may be reasonable to test that in the future.
min_value = np.finfo(np_dtype).min
Expand Down
2 changes: 1 addition & 1 deletion tests/python/relay/test_pass_plan_devices.py
Original file line number Diff line number Diff line change
Expand Up @@ -1612,7 +1612,7 @@ def @main(%a {virtual_device=meta[VirtualDevice][0]}: Tensor[(5, 7), float32], %
%1 = @on_scope_b(on_device(%b, virtual_device=meta[VirtualDevice][0], constrain_body=False));
// %c's memory scope is "scopeB", so no copy required.
%2 = @on_scope_b(on_device(%c, virtual_device=meta[VirtualDevice][0], constrain_body=False));
// result's memory scope is is on "scopeA", so will require a "scopeB"->"scopeA" copy.
// result's memory scope is on "scopeA", so will require a "scopeB"->"scopeA" copy.
%3 = add(add(%0, %1), %2);
on_device(%3, virtual_device=meta[VirtualDevice][0], constrain_body=False)
}
Expand Down
2 changes: 1 addition & 1 deletion tests/python/unittest/test_tir_transform_remove_no_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -414,7 +414,7 @@ class TestRemoveSeparatedOverwriteOfPredicatedLoop(BaseBeforeAfter):
"""Remove repeated writes to the same predicated region.

Similar to TestRemoveSeparatedOverwrites, but the independent loop
between the first and second writes writes to a different subset
between the first and second writes to a different subset
of the same buffer.
"""

Expand Down
2 changes: 1 addition & 1 deletion web/src/environment.ts
Original file line number Diff line number Diff line change
Expand Up @@ -109,7 +109,7 @@ export class Environment implements LibraryProvider {
}

private environment(initEnv: Record<string, any>): Record<string, any> {
// default env can be be overriden by libraries.
// default env can be overriden by libraries.
const defaultEnv = {
"__cxa_thread_atexit": (): void => {},
// eslint-disable-next-line @typescript-eslint/no-unused-vars
Expand Down