Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

v0.6 -> v0.7.0.dev4 templates #433

Open
wants to merge 2 commits into
base: develop
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions .editorconfig
Original file line number Diff line number Diff line change
Expand Up @@ -10,5 +10,8 @@ insert_final_newline = true
charset = utf-8
end_of_line = lf

[*.{yaml,yml,yaml.jinja}]
indent_size = 2

[LICENSE]
insert_final_newline = false
6 changes: 5 additions & 1 deletion Snakefile
Original file line number Diff line number Diff line change
Expand Up @@ -125,7 +125,7 @@ rule module_without_specific_data:
template = model_template_dir + "{template}",
output: "build/models/{resolution}/{template}"
wildcard_constraints:
template = "interest-rate.yaml"
template = "interest-rate.yaml|additional-math.yaml"
conda: "envs/shell.yaml"
shell: "cp {input.template} {output}"

Expand Down Expand Up @@ -165,6 +165,10 @@ rule model:
"techs/supply/nuclear.yaml",
]
),
math_files = expand(
"build/models/{{resolution}}/{file}",
file=["additional-math.yaml"]
),
heat_timeseries_data = (
"build/models/{resolution}/timeseries/conversion/heat-pump-cop.csv",
"build/models/{resolution}/timeseries/supply/historic-electrified-heat.csv",
Expand Down
31 changes: 16 additions & 15 deletions envs/test.yaml
Original file line number Diff line number Diff line change
@@ -1,18 +1,19 @@
name: test
channels:
- conda-forge
- gurobi
- conda-forge/label/calliope_dev
- conda-forge
- gurobi
dependencies:
- python=3.9
- ipdb=0.13.13
- numpy=1.23
- pandas=1.5
- xarray=2022.3
- gurobi=9.5.1
- pytest=7.3.1
- pytest-html=3.2.0
- calliope=0.6.10
- pyomo=6.4.1
- netCDF4=1.6.2
- hdf5=1.12.2
- libnetcdf=4.8.1
- python=3.12
- ipdb=0.13.13
- numpy=1.26.4
- pandas=2.1.4
- xarray=2024.2.0
- gurobi=11.0.3
- pytest=8.2.2
- pytest-html=3.2.0
- calliope=0.7.0dev4
- pyomo=6.7.1
- netCDF4=1.6.5
- hdf5=1.14.2
- libnetcdf=4.9.2
8 changes: 6 additions & 2 deletions lib/eurocalliopelib/template.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,10 +33,14 @@ def _update_kwargs(**kwargs):
kwargs["scaling_factors"]["specific_costs"] = (
kwargs["scaling_factors"]["monetary"] / kwargs["scaling_factors"]["power"]
)
for config_key in ["locations", "links"]: # we cannot allow keys with "." in them
for config_key in [
"locations",
"nodes",
"links",
]: # we cannot allow keys with "." in them
if config_key in kwargs:
kwargs[config_key] = kwargs[config_key].rename(
index=lambda x: x.replace(".", "-")
index=lambda x: x.replace(".", "_")
)

return kwargs
8 changes: 3 additions & 5 deletions scripts/demand/load.py
Original file line number Diff line number Diff line change
Expand Up @@ -57,10 +57,10 @@ def load(
axis=1,
)
assert math.isclose(
load_ts.sum().sum() * (-1) / scaling_factor,
load_ts.sum().sum() / scaling_factor,
national_load.reindex(columns=units.country_code.unique()).sum().sum(),
)
load_ts.tz_convert(None).to_csv(path_to_result)
load_ts.tz_convert(None).rename_axis(index="timesteps").to_csv(path_to_result)


def split_national_load(national_load, units):
Expand All @@ -86,18 +86,16 @@ def unit_time_series(
unit_industrial_ts = (
national_industrial_load.loc[:, country_code].copy()
* multiplier
* (-1)
* scaling_factor
)
multiplier = unit.fraction_of_national_residential_load
unit_residential_ts = (
national_residential_load.loc[:, country_code].copy()
* multiplier
* (-1)
* scaling_factor
)
unit_ts = unit_industrial_ts + unit_residential_ts
unit_ts.name = unit_name.replace(".", "-")
unit_ts.name = unit_name.replace(".", "_")
return unit_ts


Expand Down
6 changes: 1 addition & 5 deletions scripts/heat/heat_demand_final_timeseries.py
Original file line number Diff line number Diff line change
Expand Up @@ -119,11 +119,7 @@ def electrify_heat_demand_profiles(
scaled_profiles, cop, snakemake.params.electrification_shares
)

# Demands are stored as negative values for Calliope to ingest
if snakemake.wildcards.tech_group == "demand":
scaled_profiles *= -1

final_df = (
scaled_profiles.sum("end_use").astype("float32").to_series().unstack("id")
)
final_df.to_csv(snakemake.output[0])
final_df.rename_axis(index="timesteps").to_csv(snakemake.output[0])
2 changes: 1 addition & 1 deletion scripts/heat/heat_pump_final_timeseries.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,4 +54,4 @@ def _end_use_weighted_ave(
timeseries_data_group_end_use = group_end_uses(timeseries_data, annual_demand_ds)

final_df = timeseries_data_group_end_use.astype("float32").to_series().unstack("id")
final_df.to_csv(snakemake.output[0])
final_df.rename_axis(index="timesteps").to_csv(snakemake.output[0])
2 changes: 1 addition & 1 deletion scripts/heat/population_per_gridbox.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ def population_on_weather_grid(
# Locations are shapefiles at the resolution of interest
# (e.g. countries for resolution `national`)
locations = gpd.read_file(path_to_locations)
locations["id"] = locations["id"].str.replace(".", "-", regex=False)
locations["id"] = locations["id"].str.replace(".", "_", regex=False)

gridbox_points = gpd.GeoDataFrame(
geometry=gpd.points_from_xy(
Expand Down
2 changes: 1 addition & 1 deletion scripts/heat/rescale.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ def national_to_regional_resolution(
},
)
.mul(df_population_share)
.rename(columns=lambda col_name: col_name.replace(".", "-"))
.rename(columns=lambda col_name: col_name.replace(".", "_"))
)

pd.testing.assert_series_equal(regional_df.sum(axis=1), annual_demand.sum(axis=1))
Expand Down
3 changes: 2 additions & 1 deletion scripts/hydro/capacityfactors_hydro.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,8 @@ def time_series(plants, locations, capacities):
.pivot(index="time", columns="index_right", values="inflow_MWh")
.reindex(columns=locations.index, fill_value=0)
.div(capacities)
.rename(columns=lambda col: col.replace(".", "-"))
.rename(columns=lambda col: col.replace(".", "_"))
.rename_axis(index="timesteps")
)


Expand Down
2 changes: 1 addition & 1 deletion scripts/shapes/template_locations.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ def construct_locations(
).loc[:, ["name", "centroid"]]

locations = locations.assign(
id=locations.index.str.replace(".", "-", regex=False)
id=locations.index.str.replace(".", "_", regex=False)
).set_index("id")

parametrise_template(
Expand Down
12 changes: 6 additions & 6 deletions scripts/summarise_potentials.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ def summarise_potentials(
model = calliope.Model(path_to_model)

list_of_techs = model.inputs.techs.values
list_of_locs = model.inputs.locs.values
list_of_locs = model.inputs.nodes.values
list_of_potentials = list(set(considered_potentials).intersection(model.inputs))

summary = np.empty((len(list_of_techs), len(list_of_potentials), len(list_of_locs)))
Expand All @@ -51,25 +51,25 @@ def summarise_potentials(

summary = xr.DataArray(
summary,
dims=("techs", "potentials", "locs"),
dims=("techs", "potentials", "nodes"),
coords={
"techs": list_of_techs,
"potentials": list_of_potentials,
"locs": list_of_locs,
"nodes": list_of_locs,
"unit": (["potentials"], units),
},
)

for potential in list_of_potentials:
aux = model.get_formatted_array(potential)
aux = model._model_data[potential]
summary.coords["unit"].loc[dict(potentials=potential)] = considered_potentials[
potential
]["unit"]
for tech in list_of_techs:
for loc in list_of_locs:
try:
summary.loc[dict(techs=tech, locs=loc, potentials=potential)] = (
aux.loc[dict(techs=tech, locs=loc)]
summary.loc[dict(techs=tech, nodes=loc, potentials=potential)] = (
aux.loc[dict(techs=tech, nodes=loc)]
/ considered_potentials[potential]["sf"]
)
except KeyError:
Expand Down
12 changes: 11 additions & 1 deletion scripts/template_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,13 +5,22 @@
from eurocalliopelib.template import parametrise_template


def construct_model(path_to_template, path_to_output, modules, resolution, year):
def construct_model(
path_to_template,
path_to_output,
modules,
resolution,
math_files,
year,
):
input_files = sorted([update_path(file, resolution) for file in modules])
math_files = [update_path(file, resolution) for file in math_files]

return parametrise_template(
path_to_template,
path_to_output,
input_files=input_files,
math_files=math_files,
resolution=resolution,
year=year,
)
Expand All @@ -27,6 +36,7 @@ def update_path(path_string, resolution):
construct_model(
path_to_template=snakemake.input.template,
modules=snakemake.input.modules,
math_files=snakemake.input.math_files,
resolution=snakemake.wildcards.resolution,
year=snakemake.params.year,
path_to_output=snakemake.output[0],
Expand Down
4 changes: 2 additions & 2 deletions scripts/transport/aggregate_timeseries.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@ def create_regional_timeseries(
},
)
.mul(df_population_share)
.rename(columns=lambda col_name: col_name.replace(".", "-"))
.rename(columns=lambda col_name: col_name.replace(".", "_"))
)

pd.testing.assert_series_equal(df_regional.sum(axis=1), df_national.sum(axis=1))
Expand All @@ -79,4 +79,4 @@ def create_regional_timeseries(
else:
raise ValueError("Input resolution not recognised.")

ts.to_csv(path_to_output)
ts.rename_axis(index="timesteps").to_csv(path_to_output)
2 changes: 1 addition & 1 deletion scripts/transport/road_transport_controlled_charging.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ def scale_to_regional_resolution(df, region_country_mapping, populations):
},
)
.mul(df_population_share)
.rename(columns=lambda col_name: col_name.replace(".", "-"))
.rename(columns=lambda col_name: col_name.replace(".", "_"))
)
pd.testing.assert_series_equal(regional_df.sum(axis=1), df.sum(axis=1))
return regional_df
Expand Down
4 changes: 2 additions & 2 deletions scripts/transport/road_transport_controlled_constraints.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ def scale_to_resolution_and_create_file(
df = scale_national_to_regional(df, region_country_mapping, populations)
else:
raise ValueError(f"Resolution {resolution} is not supported")
df.tz_localize(None).rename_axis("utc-timestamp").to_csv(output_path)
df.tz_localize(None).rename_axis("timesteps").to_csv(output_path)


def scale_national_to_regional(df, region_country_mapping, populations):
Expand All @@ -33,7 +33,7 @@ def scale_national_to_regional(df, region_country_mapping, populations):
},
)
.mul(df_population_share)
.rename(columns=lambda col_name: col_name.replace(".", "-"))
.rename(columns=lambda col_name: col_name.replace(".", "_"))
)
pd.testing.assert_series_equal(regional_df.sum(axis=1), df.sum(axis=1))
return regional_df
Expand Down
3 changes: 0 additions & 3 deletions scripts/transport/road_transport_timeseries.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,9 +54,6 @@ def create_road_transport_demand_timeseries(
df_timeseries = (
df_timeseries.mul(conversion_factor)
.mul(power_scaling_factor)
.mul(
1 if historic else -1
) # historic demand is actually a supply to avoid double counting
.loc[:, country_codes]
.tz_localize(None)
.rename_axis("utc-timestamp")
Expand Down
6 changes: 4 additions & 2 deletions scripts/wind-and-solar/capacityfactors.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ def capacityfactors(
locations = (
gpd.read_file(path_to_locations).set_index("id").to_crs(EPSG3035).geometry
)
locations.index = locations.index.map(lambda x: x.replace(".", "-"))
locations.index = locations.index.map(lambda x: x.replace(".", "_"))
ts = xr.open_dataset(path_to_timeseries)
ts = ts.sel(time=slice(first_year, final_year))
# xarray will silently miss the fact that data doesn't exist with slice
Expand Down Expand Up @@ -56,7 +56,9 @@ def capacityfactors(
spatiotemporal=ts,
gridcell_overlap_threshold=gridcell_overlap_threshold,
)
capacityfactors.where(capacityfactors >= cf_threshold, 0).to_csv(path_to_result)
capacityfactors.where(capacityfactors >= cf_threshold, 0).rename_axis(
index="timesteps"
).to_csv(path_to_result)


if __name__ == "__main__":
Expand Down
4 changes: 3 additions & 1 deletion scripts/wind-and-solar/capacityfactors_offshore.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,9 @@ def capacityfactors(
capacityfactors = _allocate_to_onshore_locations(
capacityfactors_per_eez, shared_coast
)
capacityfactors.where(capacityfactors >= cf_threshold, 0).to_csv(path_to_result)
capacityfactors.where(capacityfactors >= cf_threshold, 0).rename_axis(
index="timesteps"
).to_csv(path_to_result)


def _allocate_to_onshore_locations(capacityfactors_per_eez, shared_coast):
Expand Down
2 changes: 1 addition & 1 deletion scripts/wind-and-solar/shared_coast.py
Original file line number Diff line number Diff line change
Expand Up @@ -90,7 +90,7 @@ def allocate_eezs(
# Clean up id and MRGID data
share.index = share.index.set_levels(
levels=(
share.index.levels[0].map(lambda x: x.replace(".", "-")),
share.index.levels[0].map(lambda x: x.replace(".", "_")),
share.index.levels[1].astype(int),
),
level=[0, 1],
Expand Down
25 changes: 13 additions & 12 deletions templates/environment.yaml
Original file line number Diff line number Diff line change
@@ -1,15 +1,16 @@
name: euro-calliope
channels:
- conda-forge
- gurobi
- conda-forge/label/calliope_dev
- conda-forge
- gurobi
dependencies:
- python=3.9
- numpy=1.23
- pandas=1.5
- xarray=2022.3
- netCDF4=1.6.2
- hdf5=1.12.2
- libnetcdf=4.8.1
- gurobi=9.5.1
- calliope=0.6.10
- pyomo=6.4.1
- python=3.12
- numpy=1.26.4
- pandas=2.1.4
- xarray=2024.2.0
- netCDF4=1.6.5
- hdf5=1.14.2
- libnetcdf=4.9.2
- gurobi=11.0.3
- calliope=0.7.0dev4
- pyomo=6.7.1
Loading
Loading