Skip to content

Commit

Permalink
Fix
Browse files Browse the repository at this point in the history
  • Loading branch information
co63oc committed Jan 22, 2025
1 parent 0dc0c49 commit b5cc8e3
Show file tree
Hide file tree
Showing 10 changed files with 20 additions and 19 deletions.
6 changes: 3 additions & 3 deletions paddle/fluid/framework/ir/elementwiseadd_transpose_pass.cc
Original file line number Diff line number Diff line change
Expand Up @@ -132,7 +132,7 @@ int ElementwiseAddTransposeFusePass::ApplyEleTransPattern(
}
if (!reshape->Op()->HasAttr("shape")) {
VLOG(1) << "reshape op in elementwise_add_transpose fusion do not found "
"shape attr, the fusion will be stoped.";
"shape attr, the fusion will be stopped.";
return;
}
std::vector<int> shape_attr =
Expand All @@ -145,7 +145,7 @@ int ElementwiseAddTransposeFusePass::ApplyEleTransPattern(
"in elementwise_add_transpose, "
"currently, the elementwiseadd transpose pass only support "
"reshape bay shape attr rather than shape tensor."
"Therefore, the fusion will be stoped.";
"Therefore, the fusion will be stopped.";
return;
}
if (shape_attr[3] % 8 != 0) {
Expand All @@ -154,7 +154,7 @@ int ElementwiseAddTransposeFusePass::ApplyEleTransPattern(
"in elementwise_add_transpose, "
"currently, the elementwiseadd transpose pass only support "
"channel size mod 8 == 0 for khwc8 trt format"
"Therefore, the fusion will be stoped.";
"Therefore, the fusion will be stopped.";
return;
}
std::unordered_set<const Node *> del_node_set;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ MultiTransformerLayerPattern::operator()(bool enable_int8,

std::unordered_map<std::string, std::string> node_reprs;

// x0 and src_mask is unqiue input of subgraph
// x0 and src_mask is unique input of subgraph
auto* x0 = pattern->NewNode(x0_repr());
x0->assert_is_op_input(fused_multi_transformer_name, "X")->AsInput();
auto* src_mask = pattern->NewNode(src_mask_repr());
Expand Down
4 changes: 2 additions & 2 deletions paddle/fluid/framework/new_executor/standalone_executor.cc
Original file line number Diff line number Diff line change
Expand Up @@ -158,10 +158,10 @@ StandaloneExecutor::StandaloneExecutor(const phi::Place& place,
true,
common::errors::InvalidArgument(
"When using pipeline strategy in auto "
"prarallelism with new executor, "
"parallelism with new executor, "
"the backward subprogram must be built in real "
"static build mode, but it can not "
"be staticly built in this case. You can "
"be statically built in this case. You can "
"enable 'GLOG_v=1' to obtain log information."));
}
}
Expand Down
4 changes: 2 additions & 2 deletions paddle/fluid/imperative/reducer.cc
Original file line number Diff line number Diff line change
Expand Up @@ -792,9 +792,9 @@ void Reducer::MarkVarReady(const size_t var_index, const bool is_used_var) {
"The sparse parameter[%d][%s] should have gradient. "
"Currently, DataParallel does not support sparse "
"parameters without generating gradients during training. "
"For example, if is_sparese=True is used in Embedding, "
"For example, if is_sparse=True is used in Embedding, "
"the current step of this parameter cannot generate gradient "
"because of stop_gradient/detatch, where error will occur.",
"because of stop_gradient/detach, where error will occur.",
var_index,
vars_[var_index]->Name()));
auto var_base = vars_[var_index]->GradVarBase();
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/inference/tensorrt/convert/square_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ class SquareOpConverter : public OpConverter {
const framework::Scope& scope,
bool test_mode) override {
framework::OpDesc op_desc(op, nullptr);
VLOG(3) << "convert a sqaure op to tensorrt layer ";
VLOG(3) << "convert a square op to tensorrt layer ";
nvinfer1::ITensor* input_tensor =
engine_->GetITensor(op_desc.Input("X")[0]);

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ class IR_API ConstraintsManager {
struct Range {
std::int64_t min;
std::int64_t max;
// TODO(Hongqing-work): Subsitute INT32_MAX with a more meaningful value.
// TODO(Hongqing-work): Substitute INT32_MAX with a more meaningful value.
Range() : min(1), max(INT32_MAX) {}
Range(int min_val, int max_val) : min(min_val), max(max_val) {}
};
Expand Down
2 changes: 1 addition & 1 deletion paddle/pir/src/core/operation.cc
Original file line number Diff line number Diff line change
Expand Up @@ -266,7 +266,7 @@ Operation::Operation(const AttributeMap &attributes,
id_(GenerateId()) {}

///
/// \brief op ouput related public interfaces implementation
/// \brief op output related public interfaces implementation
///
std::vector<Value> Operation::results() const {
std::vector<Value> res;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -190,7 +190,7 @@ void CheckInferSymWithInferMeta(
<< " [id:" << op->id() << "] "
<< " carefully! "
<< "shape[" << i
<< "] of infer_sym_shape shoule be int64_t NOT a symbol!";
<< "] of infer_sym_shape should be int64_t NOT a symbol!";
LOG(ERROR) << print_stream.str();
continue;
}
Expand Down
13 changes: 7 additions & 6 deletions paddle/pir/src/dialect/shape/utils/constraints_manager.cc
Original file line number Diff line number Diff line change
Expand Up @@ -317,12 +317,13 @@ void ConstraintsManager::SubstituteInConstraint(const DimExpr& origin,
const DimExpr& substituted_dim_expr =
SubstituteDimExpr(it->first, substitution_pattern);
if (substituted_dim_expr != it->first) {
PADDLE_ENFORCE_EQ(substituted_dim_expr.isa<std::string>() ||
substituted_dim_expr.isa<std::int64_t>(),
true,
common::errors::InvalidArgument(
"Bounded input DimExpr can only be subsituted with "
"a string or an integer."));
PADDLE_ENFORCE_EQ(
substituted_dim_expr.isa<std::string>() ||
substituted_dim_expr.isa<std::int64_t>(),
true,
common::errors::InvalidArgument(
"Bounded input DimExpr can only be substituted with "
"a string or an integer."));
if (substituted_dim_expr.isa<std::int64_t>()) {
std::int64_t substituted_value =
substituted_dim_expr.Get<std::int64_t>();
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/jit/dy2static/program_translator.py
Original file line number Diff line number Diff line change
Expand Up @@ -1059,7 +1059,7 @@ def concrete_program_specify_input_spec(
)
if cached_program_len > 1:
logging_utils.warn(
f"Current {self._function_spec} has more than one cached programs: {cached_program_len}, the last traced progam will be return by default."
f"Current {self._function_spec} has more than one cached programs: {cached_program_len}, the last traced program will be return by default."
)

cache_key = self._program_cache._recent_cache_key
Expand Down

0 comments on commit b5cc8e3

Please sign in to comment.