Skip to content

Commit cfe1711

Browse files
authored
chore: remove repetitive words (#16957)
1 parent b7467aa commit cfe1711

File tree

5 files changed

+7
-7
lines changed

5 files changed

+7
-7
lines changed

gallery/how_to/deploy_models/deploy_prequantized.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -162,7 +162,7 @@ def quantize_model(model, inp):
162162
#
163163
# You would see operators specific to quantization such as
164164
# qnn.quantize, qnn.dequantize, qnn.requantize, and qnn.conv2d etc.
165-
input_name = "input" # the input name can be be arbitrary for PyTorch frontend.
165+
input_name = "input" # the input name can be arbitrary for PyTorch frontend.
166166
input_shapes = [(input_name, (1, 3, 224, 224))]
167167
mod, params = relay.frontend.from_pytorch(script_module, input_shapes)
168168
# print(mod) # comment in to see the QNN IR dump

include/tvm/relax/dataflow_pattern.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -914,7 +914,7 @@ class ExternFuncPatternNode : public DFPatternNode {
914914
public:
915915
String global_symbol_; /*!< The global symbol name of the external function */
916916

917-
/*! \brief The the external function name */
917+
/*! \brief The external function name */
918918
const String& global_symbol() const { return global_symbol_; }
919919
void VisitAttrs(tvm::AttrVisitor* v) { v->Visit("global_symbol", &global_symbol_); }
920920

src/runtime/contrib/vllm/attention_kernels.cu

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -145,7 +145,7 @@ __device__ void paged_attention_kernel(
145145

146146
// Load the query to registers.
147147
// Each thread in a thread group has a different part of the query.
148-
// For example, if the the thread group size is 4, then the first thread in the group
148+
// For example, if the thread group size is 4, then the first thread in the group
149149
// has 0, 4, 8, ... th vectors of the query, and the second thread has 1, 5, 9, ...
150150
// th vectors of the query, and so on.
151151
// NOTE(woosuk): Because q is split from a qkv tensor, it may not be contiguous.
@@ -185,7 +185,7 @@ __device__ void paged_attention_kernel(
185185

186186
// Load a key to registers.
187187
// Each thread in a thread group has a different part of the key.
188-
// For example, if the the thread group size is 4, then the first thread in the group
188+
// For example, if the thread group size is 4, then the first thread in the group
189189
// has 0, 4, 8, ... th vectors of the key, and the second thread has 1, 5, 9, ... th
190190
// vectors of the key, and so on.
191191
for (int i = 0; i < NUM_TOKENS_PER_THREAD_GROUP; i++) {

src/runtime/relax_vm/kv_state.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -83,7 +83,7 @@ class KVStateObj : public Object {
8383
* with prefill length "10", "15", "20", then we pass `[5, 1, 8]`
8484
* as the seq_ids and `[10, 15, 20]` as the append_lengths.
8585
* This method is invoked right before entering the model forward
86-
* function, and contains operations to prepare the the incoming
86+
* function, and contains operations to prepare the incoming
8787
* forward. For instance, this method may send auxiliary KV cache
8888
* data structures to GPUs so that they can be operated
8989
* in the model forward function.

src/runtime/relax_vm/paged_kv_cache.cc

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -85,7 +85,7 @@ struct Block {
8585
int32_t start_pos = 0;
8686
/*!
8787
* \brief The current attention sink length of the block.
88-
* It means the the **first** sink size elements will be pinned
88+
* It means the **first** sink size elements will be pinned
8989
* in the KV cache even when sliding window is enabled.
9090
*/
9191
int32_t sink_length = 0;
@@ -247,7 +247,7 @@ class PagedKVCacheAuxDataManager {
247247
/*!
248248
* \brief Copy the append length indptr array on device.
249249
* \note Since the Q/K/V data may have raggedness in terms of lengths,
250-
* we represent the the append lengths in CSR format.
250+
* we represent the append lengths in CSR format.
251251
*/
252252
virtual NDArray CopyCurAppendLengthIndptrAsync(std::vector<int32_t>* data) = 0;
253253
/*! \brief Copy the k position offset of applying RoPE for each sequence. */

0 commit comments

Comments
 (0)