Skip to content

Commit

Permalink
Format
Browse files Browse the repository at this point in the history
  • Loading branch information
lockshaw committed Feb 16, 2025
1 parent 32e64d1 commit 3d9dee4
Show file tree
Hide file tree
Showing 63 changed files with 1,346 additions and 1,105 deletions.
2 changes: 1 addition & 1 deletion lib/compiler/include/compiler/graph_optimize_state.h
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ namespace FlexFlow {

struct GraphOptimizeState {
explicit GraphOptimizeState(GraphOptimizeResult const &graph_optimize_result,
float runtime);
float runtime);

GraphOptimizeResult graph_optimize_result;
float runtime;
Expand Down
6 changes: 4 additions & 2 deletions lib/compiler/src/compiler/graph_optimize_state.cc
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
#include "compiler/graph_optimize_state.h"
#include "pcg/parallel_computation_graph/parallel_tensor_guid_t.h"
#include "compiler/graph_optimize_result.h"
#include "pcg/parallel_computation_graph/parallel_tensor_guid_t.h"

namespace FlexFlow {

Expand Down Expand Up @@ -56,7 +56,9 @@ bool GraphOptimizeState::operator<(GraphOptimizeState const &other) const {
}

std::string format_as(GraphOptimizeState const &st) {

Check warning on line 58 in lib/compiler/src/compiler/graph_optimize_state.cc

View check run for this annotation

Codecov / codecov/patch

lib/compiler/src/compiler/graph_optimize_state.cc#L58

Added line #L58 was not covered by tests
return fmt::format("<GraphOptimizeState graph_optimize_result={} runtime={}>", st.graph_optimize_result, st.runtime);
return fmt::format("<GraphOptimizeState graph_optimize_result={} runtime={}>",
st.graph_optimize_result,
st.runtime);

Check warning on line 61 in lib/compiler/src/compiler/graph_optimize_state.cc

View check run for this annotation

Codecov / codecov/patch

lib/compiler/src/compiler/graph_optimize_state.cc#L60-L61

Added lines #L60 - L61 were not covered by tests
}

std::ostream &operator<<(std::ostream &s, GraphOptimizeState const &st) {
Expand Down
Original file line number Diff line number Diff line change
@@ -1,10 +1,10 @@
#include "compiler/series_parallel/pcg/get_pcg_series_parallel_decomposition.h"
#include "op-attrs/pcg_operator_attrs.h"
#include "pcg/parallel_computation_graph/parallel_computation_graph.h"
#include "utils/containers/get_only.h"
#include "utils/graph/digraph/algorithms/materialize_digraph_view.h"
#include "utils/graph/instances/adjacency_digraph.h"
#include "utils/graph/series_parallel/get_series_parallel_decomposition.h"
#include "op-attrs/pcg_operator_attrs.h"

namespace FlexFlow {

Expand All @@ -19,7 +19,7 @@ std::optional<SeriesParallelDecomposition>
}
}

auto layer_is_weight_or_input = [&](parallel_layer_guid_t const &l) {
auto layer_is_weight_or_input = [&](parallel_layer_guid_t const &l) {
PCGOperatorAttrs op_attrs = get_parallel_layer_attrs(pcg, l).op_attrs;
return op_attrs.has<WeightAttrs>() || op_attrs.has<InputAttrs>();
};
Expand All @@ -29,20 +29,24 @@ std::optional<SeriesParallelDecomposition>
return is_parallel_op(op_attrs);
};

std::function<parallel_layer_guid_t(parallel_layer_guid_t const &)> follow_to_last_parallel_op
= [&](parallel_layer_guid_t const &starting_point) -> parallel_layer_guid_t {
std::function<parallel_layer_guid_t(parallel_layer_guid_t const &)>
follow_to_last_parallel_op =
[&](parallel_layer_guid_t const &starting_point)
-> parallel_layer_guid_t {
assert(layer_is_weight_or_input(starting_point) ||
layer_is_parallel_op(starting_point));

assert (layer_is_weight_or_input(starting_point) || layer_is_parallel_op(starting_point));

std::unordered_set<parallel_layer_guid_t> successors = get_successors(pcg, starting_point);
std::unordered_set<parallel_layer_guid_t> successors =
get_successors(pcg, starting_point);

if (successors.size() != 1) {
return starting_point;
}

parallel_layer_guid_t successor = get_only(get_successors(pcg, starting_point));
parallel_layer_guid_t successor =
get_only(get_successors(pcg, starting_point));

assert (!layer_is_weight_or_input(successor));
assert(!layer_is_weight_or_input(successor));
if (layer_is_parallel_op(successor)) {
return follow_to_last_parallel_op(successor);
} else {
Expand All @@ -54,7 +58,7 @@ std::optional<SeriesParallelDecomposition>
std::unordered_set<parallel_layer_guid_t> weight_and_input_layers =
filter(get_parallel_layers(pcg), layer_is_weight_or_input);

std::unordered_set<parallel_layer_guid_t> par_chain_endpoints =
std::unordered_set<parallel_layer_guid_t> par_chain_endpoints =
transform(weight_and_input_layers, follow_to_last_parallel_op);

std::unordered_set<parallel_layer_guid_t> par_chain_endpoint_successors =
Expand Down
4 changes: 3 additions & 1 deletion lib/compiler/test/src/compiler/graph_optimize_result.cc
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,9 @@
namespace FlexFlow {

std::string format_as(GraphOptimizeResult const &r) {
return fmt::format("<GraphOptimizeResult\npcg={}\nmachine_mapping={}>", as_dot(r.pcg), r.machine_mapping);
return fmt::format("<GraphOptimizeResult\npcg={}\nmachine_mapping={}>",
as_dot(r.pcg),
r.machine_mapping);
}

std::ostream &operator<<(std::ostream &s, GraphOptimizeResult const &r) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -27,24 +27,25 @@ TEST_SUITE(FF_TEST_SUITE) {
ParallelComputationGraph pcg = empty_parallel_computation_graph();

TensorShape input_shape = TensorShape{
TensorDims{
FFOrdered<nonnegative_int>{
10_n, 12_n,
TensorDims{
FFOrdered<nonnegative_int>{
10_n,
12_n,
},
},
},
DataType::FLOAT,
DataType::FLOAT,
};

ParallelTensorShape par_input_shape = lift_to_parallel(input_shape);

ParallelLayerAttrs partition_attrs = ParallelLayerAttrs{
/*op_attrs=*/PCGOperatorAttrs{
RepartitionAttrs{
/*repartition_dim=*/ff_dim_t{0_n},
/*repartition_degree=*/2_n,
/*op_attrs=*/PCGOperatorAttrs{
RepartitionAttrs{
/*repartition_dim=*/ff_dim_t{0_n},
/*repartition_degree=*/2_n,
},
},
},
/*name=*/std::nullopt,
/*name=*/std::nullopt,
};

ParallelLayerAttrs relu_attrs = ParallelLayerAttrs{
Expand Down Expand Up @@ -72,19 +73,19 @@ TEST_SUITE(FF_TEST_SUITE) {
SUBCASE("no edges across split") {
ParallelLayerAddedResult input1 = pcg_add_input_layer(pcg, input_shape);
parallel_tensor_guid_t t_input1 = get_only(input1.outputs);
ParallelLayerAddedResult partition_input1 = add_parallel_layer(pcg, partition_attrs, {t_input1}, {});
ParallelLayerAddedResult partition_input1 =
add_parallel_layer(pcg, partition_attrs, {t_input1}, {});

ParallelLayerAddedResult input2 = pcg_add_input_layer(pcg, input_shape);
parallel_tensor_guid_t t_input2 = get_only(input2.outputs);
ParallelLayerAddedResult partition_input2 = add_parallel_layer(pcg, partition_attrs, {t_input2}, {});
ParallelLayerAddedResult partition_input2 =
add_parallel_layer(pcg, partition_attrs, {t_input2}, {});

PCGBinarySeriesSplit split = PCGBinarySeriesSplit{
make_series_split(
make_leaf(input1.parallel_layer),
make_leaf(partition_input1.parallel_layer)),
make_series_split(
make_leaf(input2.parallel_layer),
make_leaf(partition_input2.parallel_layer)),
make_series_split(make_leaf(input1.parallel_layer),
make_leaf(partition_input1.parallel_layer)),
make_series_split(make_leaf(input2.parallel_layer),
make_leaf(partition_input2.parallel_layer)),
};

AbstractedTensorSetMovement result =
Expand All @@ -101,21 +102,21 @@ TEST_SUITE(FF_TEST_SUITE) {
SUBCASE("single edge across split") {
ParallelLayerAddedResult input = pcg_add_input_layer(pcg, input_shape);
parallel_tensor_guid_t t_input = get_only(input.outputs);
ParallelLayerAddedResult partition_input = add_parallel_layer(pcg, partition_attrs, {t_input}, {});
ParallelLayerAddedResult partition_input =
add_parallel_layer(pcg, partition_attrs, {t_input}, {});
parallel_tensor_guid_t t_partition_input = get_only(input.outputs);

ParallelLayerAddedResult layer_1 = add_parallel_layer(
pcg, relu_attrs, {t_partition_input}, {});
ParallelLayerAddedResult layer_1 =
add_parallel_layer(pcg, relu_attrs, {t_partition_input}, {});
parallel_tensor_guid_t t_layer_1 = get_only(layer_1.outputs);
ParallelLayerAddedResult layer_2 = add_parallel_layer(
pcg, relu_attrs, {t_layer_1}, {});
ParallelLayerAddedResult layer_2 =
add_parallel_layer(pcg, relu_attrs, {t_layer_1}, {});

PCGBinarySeriesSplit split = PCGBinarySeriesSplit{
make_series_split(
make_series_split(
make_leaf(input.parallel_layer),
make_leaf(partition_input.parallel_layer)),
make_leaf(layer_1.parallel_layer)),
make_series_split(make_leaf(input.parallel_layer),
make_leaf(partition_input.parallel_layer)),
make_leaf(layer_1.parallel_layer)),
make_leaf(layer_2.parallel_layer),
};

Expand Down Expand Up @@ -147,28 +148,25 @@ TEST_SUITE(FF_TEST_SUITE) {
SUBCASE("does not include edges removed by transitive reduction") {
ParallelLayerAddedResult input = pcg_add_input_layer(pcg, input_shape);
parallel_tensor_guid_t t_input = get_only(input.outputs);
ParallelLayerAddedResult partition_input = add_parallel_layer(pcg, partition_attrs, {t_input}, {});
ParallelLayerAddedResult partition_input =
add_parallel_layer(pcg, partition_attrs, {t_input}, {});
parallel_tensor_guid_t t_partition_input = get_only(input.outputs);

ParallelLayerAddedResult layer_1 = add_parallel_layer(
pcg, relu_attrs, {t_partition_input}, {});
ParallelLayerAddedResult layer_1 =
add_parallel_layer(pcg, relu_attrs, {t_partition_input}, {});
parallel_tensor_guid_t t_layer_1 = get_only(layer_1.outputs);

ParallelLayerAddedResult layer_2 = add_parallel_layer(
pcg, relu_attrs, {t_layer_1}, {});
ParallelLayerAddedResult layer_2 =
add_parallel_layer(pcg, relu_attrs, {t_layer_1}, {});
parallel_tensor_guid_t t_layer_2 = get_only(layer_2.outputs);

ParallelLayerAddedResult layer_3 = add_parallel_layer(
pcg,
ew_add_attrs,
{t_layer_1, t_layer_2},
{});
ParallelLayerAddedResult layer_3 =
add_parallel_layer(pcg, ew_add_attrs, {t_layer_1, t_layer_2}, {});

PCGBinarySeriesSplit split = PCGBinarySeriesSplit{
make_series_split(
make_series_split(
make_leaf(input.parallel_layer),
make_leaf(partition_input.parallel_layer)),
make_series_split(make_leaf(input.parallel_layer),
make_leaf(partition_input.parallel_layer)),
make_series_split(make_leaf(layer_1.parallel_layer),
make_leaf(layer_2.parallel_layer))),
make_leaf(layer_3.parallel_layer),
Expand Down Expand Up @@ -203,24 +201,25 @@ TEST_SUITE(FF_TEST_SUITE) {
SUBCASE("single tensor, multiple consumers across split") {
ParallelLayerAddedResult input = pcg_add_input_layer(pcg, input_shape);
parallel_tensor_guid_t t_input = get_only(input.outputs);
ParallelLayerAddedResult partition_input = add_parallel_layer(pcg, partition_attrs, {t_input}, {});
ParallelLayerAddedResult partition_input =
add_parallel_layer(pcg, partition_attrs, {t_input}, {});
parallel_tensor_guid_t t_partition_input = get_only(input.outputs);

ParallelLayerAddedResult layer_1 = add_parallel_layer(
pcg, relu_attrs, {t_partition_input}, {});
ParallelLayerAddedResult layer_1 =
add_parallel_layer(pcg, relu_attrs, {t_partition_input}, {});
parallel_tensor_guid_t t_layer_1 = get_only(layer_1.outputs);

ParallelLayerAddedResult layer_2 = add_parallel_layer(
pcg, relu_attrs, {t_layer_1}, {});
ParallelLayerAddedResult layer_2 =
add_parallel_layer(pcg, relu_attrs, {t_layer_1}, {});

ParallelLayerAddedResult layer_3 = add_parallel_layer(
pcg, relu_attrs, {t_layer_1}, {});
ParallelLayerAddedResult layer_3 =
add_parallel_layer(pcg, relu_attrs, {t_layer_1}, {});

PCGBinarySeriesSplit split = PCGBinarySeriesSplit{
make_series_split(
make_series_split(make_leaf(input.parallel_layer),
make_leaf(partition_input.parallel_layer)),
make_leaf(layer_1.parallel_layer)),
make_series_split(make_leaf(input.parallel_layer),
make_leaf(partition_input.parallel_layer)),
make_leaf(layer_1.parallel_layer)),
make_parallel_split(make_leaf(layer_2.parallel_layer),
make_leaf(layer_3.parallel_layer)),
};
Expand Down Expand Up @@ -258,17 +257,18 @@ TEST_SUITE(FF_TEST_SUITE) {
SUBCASE("multiple tensors, multiple consumers across split") {
ParallelLayerAddedResult input = pcg_add_input_layer(pcg, input_shape);
parallel_tensor_guid_t t_input = get_only(input.outputs);
ParallelLayerAddedResult partition_input = add_parallel_layer(pcg, partition_attrs, {t_input}, {});
ParallelLayerAddedResult partition_input =
add_parallel_layer(pcg, partition_attrs, {t_input}, {});
parallel_tensor_guid_t t_partition_input = get_only(input.outputs);

ParallelLayerAddedResult layer_1 = add_parallel_layer(
pcg, relu_attrs, {t_partition_input}, {});
ParallelLayerAddedResult layer_1 =
add_parallel_layer(pcg, relu_attrs, {t_partition_input}, {});

ParallelLayerAddedResult layer_2 = add_parallel_layer(
pcg, relu_attrs, {t_partition_input}, {});
ParallelLayerAddedResult layer_2 =
add_parallel_layer(pcg, relu_attrs, {t_partition_input}, {});

ParallelLayerAddedResult layer_3 = add_parallel_layer(
pcg, relu_attrs, {get_only(layer_1.outputs)}, {});
ParallelLayerAddedResult layer_3 =
add_parallel_layer(pcg, relu_attrs, {get_only(layer_1.outputs)}, {});

ParallelLayerAddedResult layer_4 = add_parallel_layer(
pcg,
Expand All @@ -278,9 +278,8 @@ TEST_SUITE(FF_TEST_SUITE) {

PCGBinarySeriesSplit split = PCGBinarySeriesSplit{
make_series_split(
make_series_split(
make_leaf(input.parallel_layer),
make_leaf(partition_input.parallel_layer)),
make_series_split(make_leaf(input.parallel_layer),
make_leaf(partition_input.parallel_layer)),
make_parallel_split(make_leaf(layer_1.parallel_layer),
make_leaf(layer_2.parallel_layer))),
make_parallel_split(make_leaf(layer_3.parallel_layer),
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -100,13 +100,13 @@ TEST_SUITE(FF_TEST_SUITE) {
};

TensorShape tensor_shape = TensorShape{
TensorDims{
FFOrdered<nonnegative_int>{
10_n,
8_n,
TensorDims{
FFOrdered<nonnegative_int>{
10_n,
8_n,
},
},
},
DataType::FLOAT,
DataType::FLOAT,
};

UnmappedOpCostEstimateKey k1 = UnmappedOpCostEstimateKey{
Expand Down
Loading

0 comments on commit 3d9dee4

Please sign in to comment.