#include <ecst.hpp>
// Define some components.
namespace c
{
// Components are simple classes, usually POD structs. There is no need
// for components to derive from a "base component" class or to satisfy
// a particular interface.
struct position
{
vec2f _v;
};
struct velocity
{
vec2f _v;
};
struct acceleration
{
vec2f _v;
};
}
// Define component tags.
namespace ct
{
namespace sc = ecst::signature::component;
constexpr auto position = ecst::tag::component::v<c::position>;
constexpr auto velocity = ecst::tag::component::v<c::velocity>;
constexpr auto acceleration = ecst::tag::component::v<c::acceleration>;
}
// Define some systems.
namespace s
{
// Systems are simple classes as well, that do not need to satisfy any
// particular interface. They can store data and have any method the
// user desires.
// This system accelerates the subscribed particles.
struct acceleration
{
// The `process` method is not hardcoded or specially recognized by
// ECST in any way. Using a lambda in the execution code, we can
// tell an ECST context to execute a particular method (also
// forwarding extra arguments to it).
// The `data` parameter is a proxy object generated by the system
// execution strategy that abstracts away the eventual underlying
// parallelism.
template <typename TData>
void process(ft dt, TData& data)
{
// Notice that the code below does not know anything about the
// multithreading strategy employed by the system: the same
// syntax works with any kind (or lack) of parallel execution.
data.for_entities([&](auto eid)
{
auto& v = data.get(ct::velocity, eid)._v;
const auto& a = data.get(ct::acceleration, eid)._v;
v += a * dt;
});
}
};
// This system moves the subscribed particles.
struct velocity
{
template <typename TData>
void process(ft dt, TData& data)
{
data.for_entities([&](auto eid)
{
auto& p = data.get(ct::position, eid)._v;
const auto& v = data.get(ct::velocity, eid)._v;
p += v * dt;
});
}
};
}
// Setup compile-time settings.
namespace ecst_setup
{
// Builds and returns a "component signature list".
constexpr auto make_csl()
{
namespace sc = ecst::signature::component;
namespace slc = ecst::signature_list::component;
// Store `c::acceleration`, `c::velocity` and `c::position` in
// three separate contiguous buffers (SoA).
constexpr auto cs_acceleration =
sc::make(ct::acceleration).contiguous_buffer();
constexpr auto cs_velocity =
sc::make(ct::velocity).contiguous_buffer();
constexpr auto cs_position =
sc::make(ct::position).contiguous_buffer();
// Build and return the "component signature list".
return slc::make(cs_acceleration, cs_velocity, cs_position);
// Components can be stored in multiple ways, and users
// can define their complex storage types. Here's an example
// of "AoS" storage:
/*
constexpr auto cs_aos_physics =
sc::make(ct::acceleration, ct::velocity, ct::position)
.contiguous_buffer();
*/
}
// Builds and returns a "system signature list".
constexpr auto make_ssl()
{
// Signature namespace aliases.
namespace ss = ecst::signature::system;
namespace sls = ecst::signature_list::system;
// Inner parallelism aliases and definitions.
namespace ips = ecst::inner_parallelism::strategy;
namespace ipc = ecst::inner_parallelism::composer;
// "Split processing evenly between cores."
constexpr auto split_evenly_per_core =
ips::split_evenly_fn::v_cores();
// Acceleration system.
// * Multithreaded.
// * No dependencies.
constexpr auto ssig_acceleration =
ss::make(st::acceleration)
.parallelism(split_evenly_per_core)
.read(ct::acceleration)
.write(ct::velocity);
// Velocity system.
// * Multithreaded.
constexpr auto ssig_velocity =
ss::make(st::velocity)
.parallelism(split_evenly_per_core)
.dependencies(st::acceleration)
.read(ct::velocity)
.write(ct::position);
// Build and return the "system signature list".
return sls::make(ssig_acceleration, ssig_velocity);
}
}
// Create a particle and return its unique ID.
template <typename TProxy>
auto mk_particle(TProxy& proxy, const vec2f& position)
{
auto eid = proxy.create_entity();
auto& ca = proxy.add_component(ct::acceleration, eid);
ca._v.y = 1;
auto& cv = proxy.add_component(ct::velocity, eid);
cv._v = rndvec2f(-3, 3);
return eid;
}
int main()
{
// Namespace aliases.
using namespace ecst_setup;
namespace cs = ecst::settings;
namespace ss = ecst::scheduler;
namespace sea = ecst::system_execution_adapter;
// Define ECST context settings.
constexpr auto s =
ecst::settings::make()
.allow_inner_parallelism()
.fixed_entity_limit(ecst::sz_v<10000>)
.component_signatures(make_csl())
.system_signatures(make_ssl())
.scheduler(cs::scheduler<ss::s_atomic_counter>);
// Create an ECST context.
auto ctx = ecst::context::make_uptr(s);
// Initialize context with some entities.
ctx->step([&](auto& proxy)
{
for(sz_t i = 0; i < 1000; ++i)
{
mk_particle(proxy, random_position());
}
});
// "Game loop."
while(true)
{
auto dt = delta_time();
ctx->step([dt](auto& proxy)
{
// Start executing a chain of systems from `st::acceleration`:
proxy.execute_systems_from(st::acceleration)(
// Match systems in the chain by tag...
sea::t(st::acceleration, st::velocity, st::position)
// ...and execute the same logic for every parallel subtask:
.for_subtasks([dt](auto& s, auto& data)
{
s.process(dt, data);
})
);
// Need more control? Here's an additional example:
proxy.execute_systems_from(st::acceleration)(
sea::t(st::acceleration, st::velocity)
.for_subtasks([dt](auto& s, auto& data)
{
s.process(dt, data);
}),
sea::t(st::position).detailed_instance(
[&proxy, dt](auto& instance, auto& executor)
{
// Access system `instance` details:
std::cout << instance.subscribed().size() << "\n";
auto& s(instance.system());
do_something_before();
executor.for_subtasks([&s, dt](auto& data)
{
s.process(dt, data);
});
do_something_after();
})
);
},
// Refresh events can be caught and handled sequentially.
ecst::refresh_event::on_subscribe(st::acceleration,
[](auto& system, auto eid)
{
log() << "Entity #" << eid
<< " subscribed to acceleration system.\n".
}),
ecst::refresh_event::on_reclaim([](auto eid)
{
log() << "Entity #" << eid << " reclaimed.\n".
}));
}
}