Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -1523,7 +1523,7 @@ struct PirToPyCodeConverterHelper {
std::string ConvertBlockSignatureAsArgs(const pir::Operation* op) {
if (op->num_regions() == 0) return "";
std::stringstream ss;
const auto& ConvertPostionalArgsAsQuotedString = [&](const auto& block) {
const auto& ConvertPositionalArgsAsQuotedString = [&](const auto& block) {
std::stringstream ss;
int idx = 0;
for (const auto& value : block.args()) {
Expand All @@ -1547,7 +1547,7 @@ struct PirToPyCodeConverterHelper {
if (j++ > 0) {
ss << ",";
}
ss << "[" << ConvertPostionalArgsAsQuotedString(block) << "]";
ss << "[" << ConvertPositionalArgsAsQuotedString(block) << "]";
}
ss << "]";
}
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/distributed/collective/deep_ep/deep_ep.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -143,7 +143,7 @@ Buffer::Buffer(int rank,

// Create 32 MiB workspace
// Note(ZKK): here we allocate more(2 * M2N_NUM_WORKSPACE) to support M2N!
// Later we will opitimize here!
// Later we will optimize here!
CUDA_CHECK(
cudaMalloc(&workspace, 2 * M2N_NUM_WORKSPACE * NUM_WORKSPACE_BYTES));
CUDA_CHECK(cudaMemsetAsync(
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/infermeta/ternary.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1658,7 +1658,7 @@ void MultiClassNMSInferMeta(const MetaTensor& bboxes,
"The 2nd dimension of Input(BBoxes) must be equal to "
"last dimension of Input(Scores), which represents the "
"predicted bboxes."
"But received box_dims[1](%s) != socre_dims[2](%s)",
"But received box_dims[1](%s) != score_dims[2](%s)",
box_dims[1],
score_dims[2]));
} else {
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/kernels/funcs/dense_tensor_iterator.cc
Original file line number Diff line number Diff line change
Expand Up @@ -204,7 +204,7 @@ void DenseTensorIterator::set_output_raw_strided(int64_t output_idx,
}
op.current_dtype = op.target_dtype;
} else if (op.will_resize) {
PADDLE_THROW(common::errors::Fatal("Opreator Reize not Implemented!"));
PADDLE_THROW(common::errors::Fatal("Operator Resize not Implemented!"));
}
}

Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/kernels/funcs/fused_gemm_epilogue.h
Original file line number Diff line number Diff line change
Expand Up @@ -461,7 +461,7 @@ void ComputeFusedGemmEpilogueForward(const phi::GPUContext& dev_ctx,
sizeof(bias_data)));

if (enable_auxiliary && activation != "none") {
// Note (Ming Huang): The initialization of ReseveSpace is happened in the
// Note (Ming Huang): The initialization of ReserveSpace is happened in the
// dev_ctx.Alloc. Therefore, we set real date type up here.
if (activation == "relu") {
phi::DataType rs_type = phi::DataType::BOOL;
Expand Down
8 changes: 4 additions & 4 deletions test/ir/pir/test_symbol_overload.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ def forward(self, x, y):
return res


class SimbolNet(nn.Layer):
class SymbolNet(nn.Layer):
def __init__(self):
super().__init__()

Expand All @@ -61,7 +61,7 @@ def forward(self, x, y):
return z1, z2, z3, z4


class SimbolCompareNet(nn.Layer):
class SymbolCompareNet(nn.Layer):
def __init__(self):
super().__init__()

Expand Down Expand Up @@ -106,7 +106,7 @@ def base_net(self):
def symbol_net(self):
main_program = paddle.static.Program()
with paddle.static.program_guard(main_program):
net = SimbolNet()
net = SymbolNet()
x = paddle.static.data('x', self.shape_x, dtype='float32')
y = paddle.static.data('y', self.shape_y, dtype='float32')
x.stop_gradient = False
Expand Down Expand Up @@ -164,7 +164,7 @@ def base_net(self):
def symbol_net(self):
main_program = paddle.static.Program()
with paddle.static.program_guard(main_program):
net = SimbolCompareNet()
net = SymbolCompareNet()
x = paddle.static.data('x', self.shape_x, dtype='float32')
y = paddle.static.data('y', self.shape_y, dtype='float32')

Expand Down
4 changes: 2 additions & 2 deletions test/legacy_test/test_audio_backend.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,9 +22,9 @@

class TestAudioBackends(unittest.TestCase):
def setUp(self):
self.initParmas()
self.initParams()

def initParmas(self):
def initParams(self):
def get_wav_data(dtype: str, num_channels: int, num_frames: int):
dtype_ = getattr(paddle, dtype)
base = paddle.linspace(-1.0, 1.0, num_frames, dtype=dtype_) * 0.1
Expand Down
4 changes: 2 additions & 2 deletions test/legacy_test/test_audio_functions.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,9 +30,9 @@ def parameterize(*params):
class TestAudioFunctions(unittest.TestCase):
def setUp(self):
paddle.disable_static()
self.initParmas()
self.initParams()

def initParmas(self):
def initParams(self):
def get_wav_data(dtype: str, num_channels: int, num_frames: int):
dtype_ = getattr(paddle, dtype)
base = paddle.linspace(-1.0, 1.0, num_frames, dtype=dtype_) * 0.1
Expand Down
4 changes: 2 additions & 2 deletions test/legacy_test/test_audio_logmel_feature.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,9 +29,9 @@ def parameterize(*params):

class TestFeatures(unittest.TestCase):
def setUp(self):
self.initParmas()
self.initParams()

def initParmas(self):
def initParams(self):
def get_wav_data(dtype: str, num_channels: int, num_frames: int):
dtype_ = getattr(paddle, dtype)
base = paddle.linspace(-1.0, 1.0, num_frames, dtype=dtype_) * 0.1
Expand Down
4 changes: 2 additions & 2 deletions test/legacy_test/test_audio_mel_feature.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,9 +28,9 @@ def parameterize(*params):

class TestFeatures(unittest.TestCase):
def setUp(self):
self.initParmas()
self.initParams()

def initParmas(self):
def initParams(self):
def get_wav_data(dtype: str, num_channels: int, num_frames: int):
dtype_ = getattr(paddle, dtype)
base = paddle.linspace(-1.0, 1.0, num_frames, dtype=dtype_) * 0.1
Expand Down
2 changes: 1 addition & 1 deletion test/xpu/test_clip_op_xpu.py
Original file line number Diff line number Diff line change
Expand Up @@ -266,7 +266,7 @@ def _executed_api(self, x, min=None, max=None):

support_types = get_xpu_op_support_types('clip')
for stype in support_types:
# TODO(lilujia): disable int32 and int64 test temporarily, as xdnn not support corresponding resuce_mean
# TODO(lilujia): disable int32 and int64 test temporarily, as xdnn not support corresponding reduce_mean
if stype in ["int32", "int64"]:
continue
create_test_class(globals(), XPUTestClipOp, stype)
Expand Down
Loading