Skip to content

Commit

Permalink
[CodeStyle][Typos][F-[12-17],F-[19-24],F-[26-28]] Fix typo(Flattend
Browse files Browse the repository at this point in the history
…,`flattend`,`flattern`,`Flattern`,`filpped`,`flaot`,`follwed`,`folowing`,`formater`,`formating`,`foramt`,`formt`,`formate`,`forwad`,`forwrad`,`forword`,`founf`,`framwork`,`frequence`,`fron`,`fullfill`) (#70646)

---------

Co-authored-by: Nyakku Shigure <[email protected]>
  • Loading branch information
yinfan98 and SigureMo authored Jan 8, 2025
1 parent 80c376f commit fbc9a6a
Show file tree
Hide file tree
Showing 28 changed files with 138 additions and 158 deletions.
4 changes: 2 additions & 2 deletions CONTRIBUTING.md
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ PaddlePaddle uses this [Git branching model](http://nvie.com/posts/a-successful-
pre-commit install
```

Our pre-commit configuration requires clang-format 3.8 for auto-formating C/C++ code and yapf for Python.
Our pre-commit configuration requires clang-format 3.8 for auto-formatting C/C++ code and yapf for Python.

Once installed, `pre-commit` checks the style of code and documentation in every commit. We will see something like the following when you run `git commit`:

Expand All @@ -52,7 +52,7 @@ PaddlePaddle uses this [Git branching model](http://nvie.com/posts/a-successful-
Check for broken symlinks................................................Passed
Detect Private Key...................................(no files to check)Skipped
Fix End of Files.....................................(no files to check)Skipped
clang-formater.......................................(no files to check)Skipped
clang-format.........................................(no files to check)Skipped
[my-cool-stuff c703c041] add test file
1 file changed, 0 insertions(+), 0 deletions(-)
create mode 100644 233
Expand Down
21 changes: 0 additions & 21 deletions _typos.toml
Original file line number Diff line number Diff line change
Expand Up @@ -46,27 +46,6 @@ dobule = 'dobule'
Dowloading = 'Dowloading'
downsteram = 'downsteram'
fetchs = 'fetchs'
Flattend = 'Flattend'
flattend = 'flattend'
flattern = 'flattern'
Flattern = 'Flattern'
filpped = 'filpped'
flaot = 'flaot'
follwed = 'follwed'
folowing = 'folowing'
formater = 'formater'
formating = 'formating'
foramt = 'foramt'
formate = 'formate'
formt = 'formt'
forwrad = 'forwrad'
forwad = 'forwad'
forword = 'forword'
founf = 'founf'
framwork = 'framwork'
frequence = 'frequence'
fron = 'fron'
fullfill = 'fullfill'
Indexs = 'Indexs'
indexs = 'indexs'
indiates = 'indiates'
Expand Down
2 changes: 1 addition & 1 deletion paddle/cinn/common/ir_util.h
Original file line number Diff line number Diff line change
Expand Up @@ -191,7 +191,7 @@ inline void UnpackReduction(const ir::IndexExpr &expr, FLeaf fleaf) {
}

/*!
* \brief Flattern the expression into a vector of expressions splited by `Add`
* \brief Flatten the expression into a vector of expressions splited by `Add`
* or `Mul`.
*
* For example (Add):
Expand Down
2 changes: 1 addition & 1 deletion paddle/cinn/runtime/cuda/cuda_util.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1742,7 +1742,7 @@ void cinn_call_cholesky_nvgpu(void *v_args,
cinn_buffer_t *x = args[0].operator cinn_buffer_t *();
cinn_buffer_t *out = args[1].operator cinn_buffer_t *();
// In cuSOLVER, dense matrix stores in COL_MAJOR, thus FILL_MODE needs to be
// filpped. See also:
// flipped. See also:
// https://docs.nvidia.com/cuda/cusolver/index.html#matrix-dense-format
cublasFillMode_t uplo =
upper ? CUBLAS_FILL_MODE_LOWER : CUBLAS_FILL_MODE_UPPER;
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/framework/data_transform.cc
Original file line number Diff line number Diff line change
Expand Up @@ -173,7 +173,7 @@ phi::GetKernelTypeForVarContext BuildGetKernelTypeForVarContext(
if (has_infer_varkernel_fn) {
for (auto &attr : fluid_attrs) {
switch (attr.second.index()) {
case 3: // string type in framwork::Attribute
case 3: // string type in framework::Attribute
(*phi_attrs)[attr.first] = PADDLE_GET_CONST(std::string, attr.second);
break;
default:
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/framework/new_executor/pir_interpreter.cc
Original file line number Diff line number Diff line change
Expand Up @@ -979,7 +979,7 @@ void PirInterpreter::BuildInstruction() {
}

std::string PirInterpreter::DebugInstructions() {
// log formate: var[101] = pd_op.relu(var[100]) or for inplace op var[100] =
// log format: var[101] = pd_op.relu(var[100]) or for inplace op var[100] =
// pd_op.relu_(var[100])
std::stringstream ss;
ss << "{outputs}"
Expand Down
4 changes: 2 additions & 2 deletions paddle/fluid/inference/tensorrt/op_teller.cc
Original file line number Diff line number Diff line change
Expand Up @@ -3467,9 +3467,9 @@ struct CustomGenericPluginTeller : public Teller {
"SetTrtInferShapeFn.";
return false;
}
auto& trt_supports_formate_config =
auto& trt_supports_format_config =
OpMetaInfoHelper::GetTrtSupportsFormatConfig(op_info);
if (trt_supports_formate_config.empty()) {
if (trt_supports_format_config.empty()) {
VLOG(3)
<< op_type
<< " has no trt supportsFormatCombination config. Please set by "
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -311,9 +311,9 @@ bool CustomGenericPlugin::supportsFormatCombination(
auto& op_meta_info_map = OpMetaInfoMap::Instance();
const auto& meta_info_map = op_meta_info_map.GetMap();
auto& op_info = meta_info_map.at(op_desc_.Type()).front();
auto& supports_formate_config =
auto& supports_format_config =
OpMetaInfoHelper::GetTrtSupportsFormatConfig(op_info);
PADDLE_ENFORCE_NE(supports_formate_config.empty(),
PADDLE_ENFORCE_NE(supports_format_config.empty(),
true,
common::errors::InvalidArgument(
"The %s op has no tensorrt plugin "
Expand All @@ -325,7 +325,7 @@ bool CustomGenericPlugin::supportsFormatCombination(
size_t output_num = OpMetaInfoHelper::GetOutputs(op_info).size();
std::vector<std::vector<std::pair<std::string, std::string>>>
format_combinations;
for (auto& config : supports_formate_config) {
for (auto& config : supports_format_config) {
auto format_combination = parseConfig(op_desc_.Type(), config);
PADDLE_ENFORCE_EQ(input_num + output_num,
format_combination.size(),
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/inference/tensorrt/plugin/gelu_op_plugin.cu
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@ __device__ half do_tanh<half>(half a) {
return __float2half(tmp);
}

// the kernel below is not aligned with fluid fp32 forwrad ones, use it for
// the kernel below is not aligned with fluid fp32 forward ones, use it for
// fp16.
template <typename T, unsigned TPB>
__global__ void no_exact_gelu_kernel(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2631,7 +2631,7 @@ bool GroupNormOpInferSymbolicShape(
channel_idx = 1;
} else {
PADDLE_THROW(common::errors::Unimplemented(
"GroupNorm only suport NHWC and NCHW data formt"));
"GroupNorm only suport NHWC and NCHW data format"));
}

symbol::DimExpr channel_dim = x_shape.shape()[channel_idx];
Expand Down
18 changes: 9 additions & 9 deletions paddle/phi/kernels/cpu/unique_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -83,15 +83,15 @@ void UniqueRawKernel(const Context& context,
if (axis.empty()) {
phi::VisitDataTypeTiny(
dtype,
phi::funcs::UniqueFlattendTensorFunctor<Context, T>(context,
x,
out,
indices,
index,
counts,
return_index,
return_inverse,
return_counts));
phi::funcs::UniqueFlattenedTensorFunctor<Context, T>(context,
x,
out,
indices,
index,
counts,
return_index,
return_inverse,
return_counts));
} else {
int axis_value = axis[0];
axis_value = (axis_value == -1) ? (x.dims().size() - 1) : axis_value;
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/kernels/funcs/math_cuda_utils.h
Original file line number Diff line number Diff line change
Expand Up @@ -298,7 +298,7 @@ __inline__ __device__ T PartialWarpReduceMin(T val, warp_mask_t lane_mask) {
T warp_val = __shfl_sync(lane_mask, val, 0, warpSize);
#else
T warp_val = __shfl(
val, 0, warpSize); // To fullfill the data in each thread of this warp.
val, 0, warpSize); // To fulfill the data in each thread of this warp.
#endif
warp_val = val;

Expand Down
56 changes: 28 additions & 28 deletions paddle/phi/kernels/funcs/unique_functor.h
Original file line number Diff line number Diff line change
Expand Up @@ -130,15 +130,15 @@ static bool Equal(const DenseTensor& a, const DenseTensor& b) {
}

template <typename Context, typename InT, typename IndexT>
static void UniqueFlattendTensor(const Context& context,
const DenseTensor& in,
DenseTensor* out,
DenseTensor* indices,
DenseTensor* index,
DenseTensor* count,
bool return_index,
bool return_inverse,
bool return_counts) {
static void UniqueFlattenedTensor(const Context& context,
const DenseTensor& in,
DenseTensor* out,
DenseTensor* indices,
DenseTensor* index,
DenseTensor* count,
bool return_index,
bool return_inverse,
bool return_counts) {
const InT* in_data = in.data<InT>();
std::set<InT> unique(in_data, in_data + in.numel());
out->Resize(common::make_ddim({static_cast<int64_t>(unique.size())}));
Expand Down Expand Up @@ -327,7 +327,7 @@ static void UniqueDim(const Context& context,
}

template <typename Context, typename InT>
struct UniqueFlattendTensorFunctor {
struct UniqueFlattenedTensorFunctor {
const Context& ctx_; /* */
const DenseTensor& in_;
DenseTensor* out_;
Expand All @@ -338,15 +338,15 @@ struct UniqueFlattendTensorFunctor {
const bool return_inverse_;
const bool return_counts_;

UniqueFlattendTensorFunctor(const Context& context,
const DenseTensor& in,
DenseTensor* out,
DenseTensor* indices,
DenseTensor* index,
DenseTensor* count,
bool return_index,
bool return_inverse,
bool return_counts)
UniqueFlattenedTensorFunctor(const Context& context,
const DenseTensor& in,
DenseTensor* out,
DenseTensor* indices,
DenseTensor* index,
DenseTensor* count,
bool return_index,
bool return_inverse,
bool return_counts)
: ctx_(context),
in_(in),
out_(out),
Expand All @@ -359,15 +359,15 @@ struct UniqueFlattendTensorFunctor {

template <typename IndexT>
void apply() const {
UniqueFlattendTensor<Context, InT, IndexT>(ctx_,
in_,
out_,
indices_,
index_,
count_,
return_index_,
return_inverse_,
return_counts_);
UniqueFlattenedTensor<Context, InT, IndexT>(ctx_,
in_,
out_,
indices_,
index_,
count_,
return_index_,
return_inverse_,
return_counts_);
}
};

Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/kernels/gpu/rms_norm_funcs.h
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ limitations under the License. */

/* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. */

/*This code is copied fron NVIDIA apex:
/*This code is copied from NVIDIA apex:
* https://github.com/NVIDIA/apex
* with minor changes. */

Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/kernels/gpu/rms_norm_grad_kernel.cu
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and
limitations under the License. */

/* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. */
/*This code is copied fron NVIDIA apex:
/*This code is copied from NVIDIA apex:
* https://github.com/NVIDIA/apex
* with minor changes. */

Expand Down
4 changes: 2 additions & 2 deletions paddle/phi/kernels/gpu/unique_consecutive_functor.h
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@

namespace phi {

// The core logic of computing Unique Consecutive for a flattend Tensor
// The core logic of computing Unique Consecutive for a flattened Tensor
template <typename Context,
typename InT,
typename IndexT,
Expand Down Expand Up @@ -113,7 +113,7 @@ static void UniqueConsecutiveFlattenedCUDATensor(const Context& context,
}
}

// functor for processing a flattend Tensor
// functor for processing a flattened Tensor
template <typename Context, typename InT>
struct UniqueConsecutiveFlattenedCUDAFunctor {
const Context& ctx_;
Expand Down
Loading

0 comments on commit fbc9a6a

Please sign in to comment.