Skip to content

Make summary

Create summary CSV files for all scenario runs including costs, capacities, capacity factors, curtailment, energy balances, prices and other metrics.

assign_carriers(n)

Assign AC where missing Args: n (pypsa.Network): the network object to fix

Source code in workflow/scripts/make_summary.py
29
30
31
32
33
34
def assign_carriers(n: pypsa.Network):
    """Assign AC where missing
    Args:
        n (pypsa.Network): the network object to fix"""
    if "carrier" not in n.lines:
        n.lines["carrier"] = "AC"

calculate_capacities(n, label, capacities, adjust_link_capacities=None)

Calculate the optimal capacities by carrier and bus carrier

For links that connect to AC buses (bus1=AC), the capacity can be multiplied by efficiency to report the actual capacity available at the AC side rather than the input side. This ensures consistent capacity reporting across the network.

Parameters:

Name Type Description Default
n Network

the network object

required
label str

the label used by make summaries

required
capacities DataFrame

the dataframe to fill/update

required
adjust_link_capacities bool

Whether to adjust link capacities by efficiency. If None, reads from config. Defaults to None.

None

Returns:

Type Description
DataFrame

pd.DataFrame: updated capacities

Source code in workflow/scripts/make_summary.py
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
def calculate_capacities(
    n: pypsa.Network, label: str, capacities: pd.DataFrame, adjust_link_capacities=None
) -> pd.DataFrame:
    """Calculate the optimal capacities by carrier and bus carrier

    For links that connect to AC buses (bus1=AC), the capacity can be multiplied by efficiency
    to report the actual capacity available at the AC side rather than the input side.
    This ensures consistent capacity reporting across the network.

    Args:
        n (pypsa.Network): the network object
        label (str): the label used by make summaries
        capacities (pd.DataFrame): the dataframe to fill/update
        adjust_link_capacities (bool, optional): Whether to adjust link capacities by efficiency.
            If None, reads from config. Defaults to None.

    Returns:
        pd.DataFrame: updated capacities
    """

    # Temporarily save original link capacities
    original_p_nom_opt = n.links.p_nom_opt.copy()

    # Drop reversed links & report AC capacities for links from X to AC
    if adjust_link_capacities:

        # For links where bus1 is AC, multiply capacity by efficiency coefficient to get AC side capacity
        ac_links = n.links[n.links.bus1.map(n.buses.carrier) == "AC"].index
        n.links.loc[ac_links, "p_nom_opt"] *= n.links.loc[ac_links, "efficiency"]

        # ignore lossy link dummies
        pseudo_links = n.links.query("Link.str.contains('reversed') & capital_cost ==0 ").index
        n.links.loc[pseudo_links, "p_nom_opt"] = 0
    # Calculate optimal capacity using default grouper
    caps = n.statistics.optimal_capacity(
        groupby=pypsa.statistics.get_carrier_and_bus_carrier, nice_names=False
    )

    # Restore original link capacities to avoid modifying the network object
    n.links.p_nom_opt = original_p_nom_opt

    if "load shedding" in caps.index.get_level_values(1):
        caps.drop("load shedding", level=1, inplace=True)
    caps.rename(index={"AC": "Transmission Lines"}, inplace=True, level=1)

    # track links that feed into AC
    mask = (n.links.bus1.map(n.buses.carrier) == "AC") & (n.links.carrier != "stations")
    to_ac = n.links.loc[mask, "carrier"].unique()

    caps_df = caps.reset_index()
    ac_mask = caps_df["carrier"].isin(to_ac)
    caps_df.loc[ac_mask, "end_carrier"] = "AC"
    caps = caps_df.fillna("-").set_index(["component", "carrier", "bus_carrier", "end_carrier"])[0]

    capacities[label] = caps.sort_index(level=0)
    return capacities

calculate_cfs(n, label, cfs)

Calculate the capacity factors by carrier

Parameters:

Name Type Description Default
n Network

the network object

required
label str

the label used by make summaries

required
cfs DataFrame

the dataframe to fill/update

required

Returns: pd.DataFrame: updated cfs

Source code in workflow/scripts/make_summary.py
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
def calculate_cfs(n: pypsa.Network, label: str, cfs: pd.DataFrame) -> pd.DataFrame:
    """Calculate the capacity factors by carrier

    Args:
        n (pypsa.Network): the network object
        label (str): the label used by make summaries
        cfs (pd.DataFrame): the dataframe to fill/update
    Returns:
        pd.DataFrame: updated cfs
    """
    for c in n.iterate_components(
        n.branch_components | n.controllable_one_port_components ^ {"Load", "StorageUnit"}
    ):
        capacities_c = c.df[opt_name.get(c.name, "p") + "_nom_opt"].groupby(c.df.carrier).sum()

        if c.name in ["Link", "Line", "Transformer"]:
            p = c.pnl.p0.abs().mean()
        elif c.name == "Store":
            p = c.pnl.e.abs().mean()
        else:
            p = c.pnl.p.abs().mean()

        p_c = p.groupby(c.df.carrier).sum()
        cf_c = p_c / capacities_c
        cf_c = pd.concat([cf_c], keys=[c.list_name])
        cfs = cfs.reindex(cf_c.index.union(cfs.index))
        cfs.loc[cf_c.index, label] = cf_c

    return cfs

calculate_co2_balance(n, label, co2_balance, withdrawal_stores=['CO2 capture'])

calc the co2 balance [DOES NOT INCLUDE EMISSION GENERATING LINKSs] Args: n (pypsa.Network): the network object withdrawal_stores (list, optional): names of stores. Defaults to ["CO2 capture"]. label (str): the label for the column co2_balance (pd.DataFrame): the df to update

Returns:

Type Description
DataFrame

pd.DataFrame: updated co2_balance (bad style)

Source code in workflow/scripts/make_summary.py
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
def calculate_co2_balance(
    n: pypsa.Network,
    label: str,
    co2_balance: pd.DataFrame,
    withdrawal_stores=["CO2 capture"],
) -> pd.DataFrame:
    """calc the co2 balance [DOES NOT INCLUDE EMISSION GENERATING LINKSs]
    Args:
        n (pypsa.Network): the network object
        withdrawal_stores (list, optional): names of stores. Defaults to ["CO2 capture"].
        label (str): the label for the column
        co2_balance (pd.DataFrame): the df to update

    Returns:
       pd.DataFrame: updated co2_balance (bad style)
    """

    # year *(assumes one planning year intended),
    year = int(np.round(n.snapshots.year.values.mean(), 0))

    # emissions from generators (from fneumann course)
    emissions = (
        n.generators_t.p
        / n.generators.efficiency
        * n.generators.carrier.map(n.carriers.co2_emissions)
    )  # t/h
    emissions_carrier = (
        (n.snapshot_weightings.generators @ emissions).groupby(n.generators.carrier).sum()
    )

    # format and drop 0 values
    emissions_carrier = emissions_carrier.where(emissions_carrier > 0).dropna()
    emissions_carrier.rename(year, inplace=True)
    emissions_carrier = emissions_carrier.to_frame()
    # CO2 withdrawal
    stores = n.stores_t.e.T.groupby(n.stores.carrier).sum()
    co2_stores = stores.index.intersection(withdrawal_stores)
    co2_withdrawal = stores.iloc[:, -1].loc[co2_stores] * -1
    co2_withdrawal.rename(year, inplace=True)
    co2_withdrawal = co2_withdrawal.to_frame()
    year_balance = pd.concat([emissions_carrier, co2_withdrawal])

    #  combine with previous
    co2_balance = co2_balance.reindex(year_balance.index.union(co2_balance.index))
    co2_balance.loc[year_balance.index, label] = year_balance[year]

    return co2_balance

calculate_costs(n, label, costs)

Calculate the costs by carrier Args: n (pypsa.Network): the network object label (str): the label used by make summaries costs (pd.DataFrame): the dataframe to fill/update Returns: pd.DataFrame: updated costs

Source code in workflow/scripts/make_summary.py
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
def calculate_costs(n: pypsa.Network, label: str, costs: pd.DataFrame) -> pd.DataFrame:
    """Calculate the costs by carrier
    Args:
        n (pypsa.Network): the network object
        label (str): the label used by make summaries
        costs (pd.DataFrame): the dataframe to fill/update
    Returns:
        pd.DataFrame: updated costs
    """

    for c in n.iterate_components(
        n.branch_components | n.controllable_one_port_components ^ {"Load"}
    ):
        capital_costs = c.df.capital_cost * c.df[opt_name.get(c.name, "p") + "_nom_opt"]
        capital_costs_grouped = capital_costs.groupby(c.df.carrier).sum()

        capital_costs_grouped = pd.concat([capital_costs_grouped], keys=["capital"])
        capital_costs_grouped = pd.concat([capital_costs_grouped], keys=[c.list_name])

        costs = costs.reindex(capital_costs_grouped.index.union(costs.index))

        costs.loc[capital_costs_grouped.index, label] = capital_costs_grouped

        if c.name == "Link":
            p = c.pnl.p0.multiply(n.snapshot_weightings.generators, axis=0).sum()
        elif c.name == "Line":
            continue
        elif c.name == "StorageUnit":
            p_all = c.pnl.p.multiply(n.snapshot_weightings.generators, axis=0)
            p_all[p_all < 0.0] = 0.0
            p = p_all.sum()
        else:
            p = c.pnl.p.multiply(n.snapshot_weightings.generators, axis=0).sum()

        # correct sequestration cost
        if c.name == "Store":
            items = c.df.index[(c.df.carrier == "co2 stored") & (c.df.marginal_cost <= -100.0)]
            c.df.loc[items, "marginal_cost"] = -20.0

        marginal_costs = p * c.df.marginal_cost

        marginal_costs_grouped = marginal_costs.groupby(c.df.carrier).sum()

        marginal_costs_grouped = pd.concat([marginal_costs_grouped], keys=["marginal"])
        marginal_costs_grouped = pd.concat([marginal_costs_grouped], keys=[c.list_name])

        costs = costs.reindex(marginal_costs_grouped.index.union(costs.index))

        costs.loc[marginal_costs_grouped.index, label] = marginal_costs_grouped

    # TODO remove/see if needed, and if yes soft-code
    # add back in all hydro
    # costs.loc[("storage_units", "capital", "hydro"),label] = (0.01)*2e6*n.storage_units.loc[n.storage_units.group=="hydro", "p_nom"].sum()
    # costs.loc[("storage_units", "capital", "PHS"),label] = (0.01)*2e6*n.storage_units.loc[n.storage_units.group=="PHS", "p_nom"].sum()
    # costs.loc[("generators", "capital", "ror"),label] = (0.02)*3e6*n.generators.loc[n.generators.group=="ror", "p_nom"].sum()

    return costs

calculate_curtailment(n, label, curtailment)

Calculate curtailed energy by carrier

Parameters:

Name Type Description Default
n Network

the network object

required
label str

the label used by make summaries

required
curtailment DataFrame

the dataframe to fill/update

required

Returns: pd.DataFrame: updated curtailment

Source code in workflow/scripts/make_summary.py
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
def calculate_curtailment(n: pypsa.Network, label: str, curtailment: pd.DataFrame) -> pd.DataFrame:
    """Calculate curtailed energy by carrier

    Args:
        n (pypsa.Network): the network object
        label (str): the label used by make summaries
        curtailment (pd.DataFrame): the dataframe to fill/update
    Returns:
        pd.DataFrame: updated curtailment
    """
    p_avail_by_carr = (
        n.generators_t.p_max_pu.multiply(n.generators.p_nom_opt)
        .sum()
        .groupby(n.generators.carrier)
        .sum()
    )
    used = n.generators_t.p.sum().groupby(n.generators.carrier).sum()

    curtailment[label] = (
        ((p_avail_by_carr - used).clip(0) / p_avail_by_carr).fillna(0) * 100
    ).round(3)

    return curtailment

calculate_expanded_capacities(n, label, capacities)

calculate the capacities by carrier

Parameters:

Name Type Description Default
n Network

the network object

required
label str

the label used by make summaries

required
capacities DataFrame

the dataframe to fill

required

Returns:

Type Description
DataFrame

pd.Dataframe: updated capacities (bad style)

Source code in workflow/scripts/make_summary.py
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
def calculate_expanded_capacities(
    n: pypsa.Network, label: str, capacities: pd.DataFrame
) -> pd.DataFrame:
    """calculate the capacities by carrier

    Args:
        n (pypsa.Network): the network object
        label (str): the label used by make summaries
        capacities (pd.DataFrame): the dataframe to fill

    Returns:
        pd.Dataframe: updated capacities (bad style)
    """
    caps = n.statistics.expanded_capacity(
        groupby=pypsa.statistics.get_carrier_and_bus_carrier, nice_names=False
    )

    if "load shedding" in caps.index.get_level_values(1):
        caps.drop("load shedding", level=1, inplace=True)

    caps.rename(index={"AC": "Transmission Lines"}, inplace=True, level=1)

    # track links that feed into AC
    mask = (n.links.bus1.map(n.buses.carrier) == "AC") & (n.links.carrier != "stations")
    to_ac = n.links.loc[mask, "carrier"].unique()

    caps_df = caps.reset_index()
    ac_mask = caps_df["carrier"].isin(to_ac)
    caps_df.loc[ac_mask, "end_carrier"] = "AC"
    caps = caps_df.fillna("-").set_index(["component", "carrier", "bus_carrier", "end_carrier"])[0]

    capacities[label] = caps.sort_index(level=0)
    return capacities

calculate_market_values(n, label, market_values)

Calculate the market value of the generators and links Args: n (pypsa.Network): the network object label (str): the label representing the pathway market_values (pd.DataFrame): the dataframe to write to (not needed, refactor) Returns: pd.DataFrame: updated market_values

Source code in workflow/scripts/make_summary.py
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
def calculate_market_values(
    n: pypsa.Network, label: str, market_values: pd.DataFrame
) -> pd.DataFrame:
    """Calculate the market value of the generators and links
    Args:
        n (pypsa.Network): the network object
        label (str): the label representing the pathway
        market_values (pd.DataFrame): the dataframe to write to (not needed, refactor)
    Returns:
        pd.DataFrame: updated market_values
    """
    # Warning: doesn't include storage units

    carrier = "AC"

    buses = n.buses.index[n.buses.carrier == carrier]

    # === First do market value of generators  ===
    # === First do market value of generators  ===

    generators = n.generators.index[n.buses.loc[n.generators.bus, "carrier"] == carrier]

    techs = n.generators.loc[generators, "carrier"].value_counts().index

    market_values = market_values.reindex(market_values.index.union(techs))

    for tech in techs:
        gens = generators[n.generators.loc[generators, "carrier"] == tech]

        dispatch = (
            n.generators_t.p[gens]
            .groupby(n.generators.loc[gens, "bus"], axis=1)
            .sum()
            .reindex(columns=buses, fill_value=0.0)
        )

        revenue = dispatch * n.buses_t.marginal_price[buses]

        market_values.at[tech, label] = revenue.sum().sum() / dispatch.sum().sum()

    # === Now do market value of links  ===
    # === Now do market value of links  ===

    for i in ["0", "1"]:
        carrier_links = n.links[n.links["bus" + i].isin(buses)].index

        techs = n.links.loc[carrier_links, "carrier"].value_counts().index

        market_values = market_values.reindex(market_values.index.union(techs))

        for tech in techs:
            links = carrier_links[n.links.loc[carrier_links, "carrier"] == tech]

            dispatch = (
                n.links_t["p" + i][links]
                .groupby(n.links.loc[links, "bus" + i], axis=1)
                .sum()
                .reindex(columns=buses, fill_value=0.0)
            )

            revenue = dispatch * n.buses_t.marginal_price[buses]

            market_values.at[tech, label] = revenue.sum().sum() / dispatch.sum().sum()

    return market_values

calculate_metrics(n, label, metrics)

LEGACY calculate a set of metrics for lines and co2 Args: n (pypsa.Network): the network object label (str): the label to update the table row with metrics (pd.DataFrame): the dataframe to write to (not needed, refactor) Returns: pd.DataFrame: updated metrics

Source code in workflow/scripts/make_summary.py
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
def calculate_metrics(n: pypsa.Network, label: str, metrics: pd.DataFrame):
    """LEGACY calculate a set of metrics for lines and co2
    Args:
        n (pypsa.Network): the network object
        label (str): the label to update the table row with
        metrics (pd.DataFrame): the dataframe to write to (not needed, refactor)
    Returns:
        pd.DataFrame: updated metrics"""

    metrics_list = [
        "line_volume",
        "line_volume_limit",
        "line_volume_AC",
        "line_volume_DC",
        "line_volume_shadow",
        "co2_shadow",
        "co2_budget",
    ]

    metrics = metrics.reindex(pd.Index(metrics_list).union(metrics.index))

    metrics.at["line_volume_DC", label] = (n.links.length * n.links.p_nom_opt)[
        n.links.carrier == "DC"
    ].sum()
    metrics.at["line_volume_AC", label] = (n.lines.length * n.lines.s_nom_opt).sum()
    metrics.at["line_volume", label] = metrics.loc[
        ["line_volume_AC", "line_volume_DC"], label
    ].sum()

    if "lv_limit" in n.global_constraints.index:
        metrics.at["line_volume_limit", label] = n.global_constraints.at["lv_limit", "constant"]
        metrics.at["line_volume_shadow", label] = n.global_constraints.at["lv_limit", "mu"]

    if "co2_limit" in n.global_constraints.index:
        metrics.at["co2_shadow", label] = n.global_constraints.at["co2_limit", "mu"]
        metrics.at["co2_budget", label] = n.global_constraints.at["co2_limit", "constant"]
    return metrics

calculate_nodal_capacities(n, label, nodal_capacities)

Calculate the capacities by carrier and node

Parameters:

Name Type Description Default
n Network

the network object

required
label str

the label used by make summaries

required
nodal_capacities DataFrame

the dataframe to fill/update

required

Returns: pd.DataFrame: updated nodal_capacities

Source code in workflow/scripts/make_summary.py
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
def calculate_nodal_capacities(
    n: pypsa.Network, label: str, nodal_capacities: pd.DataFrame
) -> pd.DataFrame:
    """Calculate the capacities by carrier and node

    Args:
        n (pypsa.Network): the network object
        label (str): the label used by make summaries
        nodal_capacities (pd.DataFrame): the dataframe to fill/update
    Returns:
        pd.DataFrame: updated nodal_capacities"""
    # Beware this also has extraneous locations for country (e.g. biomass) or continent-wide
    #  (e.g. fossil gas/oil) stuff

    # Filter out reversed links to avoid double-counting transmission capacity
    # Only include positive links since positive and reversed links have the same capacity
    positive_links_mask = n.links.index.str.contains("positive")

    # Create a temporary network with only positive links for capacity calculation
    n_temp = n.copy()
    reversed_links = n.links.index[~positive_links_mask]
    n_temp.links = n_temp.links.drop(reversed_links)

    nodal_cap = n_temp.statistics.optimal_capacity(groupby=pypsa.statistics.get_bus_and_carrier)
    nodal_capacities[label] = nodal_cap.sort_index(level=0)
    return nodal_capacities

calculate_nodal_cfs(n, label, nodal_cfs)

Calculate the capacity factors by for each node and genertor Args: n (pypsa.Network): the network object label (str): the label used by make summaries nodal_cfs (pd.DataFrame): the cap fac dataframe to fill/update Returns: pd.DataFrame: updated nodal_cfs

Source code in workflow/scripts/make_summary.py
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
def calculate_nodal_cfs(n: pypsa.Network, label: str, nodal_cfs: pd.DataFrame):
    """Calculate the capacity factors by for each node and genertor
    Args:
        n (pypsa.Network): the network object
        label (str): the label used by make summaries
        nodal_cfs (pd.DataFrame): the cap fac dataframe to fill/update
    Returns:
        pd.DataFrame: updated nodal_cfs
    """
    # Beware this also has extraneous locations for country (e.g. biomass)
    # or continent-wide (e.g. fossil gas/oil) stuff
    for c in n.iterate_components(
        (n.branch_components ^ {"Line", "Transformer"})
        | n.controllable_one_port_components ^ {"Load", "StorageUnit"}
    ):
        capacities_c = c.df.groupby(["location", "carrier"])[
            opt_name.get(c.name, "p") + "_nom_opt"
        ].sum()

        if c.name == "Link":
            p = c.pnl.p0.abs().mean()
        elif c.name == "Generator":
            p = c.pnl.p.abs().mean()
        elif c.name == "Store":
            p = c.pnl.e.abs().mean()
        else:
            sys.exit()

        c.df["p"] = p
        p_c = c.df.groupby(["location", "carrier"])["p"].sum()
        cf_c = p_c / capacities_c

        index = pd.MultiIndex.from_tuples([(c.list_name,) + t for t in cf_c.index.to_list()])
        nodal_cfs = nodal_cfs.reindex(index.union(nodal_cfs.index))
        nodal_cfs.loc[index, label] = cf_c.values

    return nodal_cfs

calculate_nodal_costs(n, label, nodal_costs)

Calculate the costs by carrier and location Args: n (pypsa.Network): the network object label (str): the label used by make summaries nodal_costs (pd.DataFrame): the dataframe to fill/update Returns: pd.DataFrame: updated nodal_costs

Source code in workflow/scripts/make_summary.py
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
def calculate_nodal_costs(n: pypsa.Network, label: str, nodal_costs: pd.DataFrame):
    """Calculate the costs by carrier and location
    Args:
        n (pypsa.Network): the network object
        label (str): the label used by make summaries
        nodal_costs (pd.DataFrame): the dataframe to fill/update
    Returns:
        pd.DataFrame: updated nodal_costs
    """
    # Beware this also has extraneous locations for country (e.g. biomass)
    #  or continent-wide (e.g. fossil gas/oil) stuff
    for c in n.iterate_components(
        n.branch_components | n.controllable_one_port_components ^ {"Load"}
    ):
        c.df["capital_costs"] = c.df.capital_cost * c.df[opt_name.get(c.name, "p") + "_nom_opt"]
        capital_costs = c.df.groupby(["location", "carrier"])["capital_costs"].sum()
        index = pd.MultiIndex.from_tuples(
            [(c.list_name, "capital") + t for t in capital_costs.index.to_list()]
        )
        nodal_costs = nodal_costs.reindex(index.union(nodal_costs.index))
        nodal_costs.loc[index, label] = capital_costs.values

        if c.name == "Link":
            p = c.pnl.p0.multiply(n.snapshot_weightings.generators, axis=0).sum()
        elif c.name == "Line":
            continue
        elif c.name == "StorageUnit":
            p_all = c.pnl.p.multiply(n.snapshot_weightings.generators, axis=0)
            p_all[p_all < 0.0] = 0.0
            p = p_all.sum()
        else:
            p = c.pnl.p.multiply(n.snapshot_weightings.generators, axis=0).sum()

        # correct sequestration cost
        if c.name == "Store":
            items = c.df.index[(c.df.carrier == "co2 stored") & (c.df.marginal_cost <= -100.0)]
            c.df.loc[items, "marginal_cost"] = -20.0

        c.df["marginal_costs"] = p * c.df.marginal_cost
        marginal_costs = c.df.groupby(["location", "carrier"])["marginal_costs"].sum()
        index = pd.MultiIndex.from_tuples(
            [(c.list_name, "marginal") + t for t in marginal_costs.index.to_list()]
        )
        nodal_costs = nodal_costs.reindex(index.union(nodal_costs.index))
        nodal_costs.loc[index, label] = marginal_costs.values

    return nodal_costs

calculate_nodal_lcoe(n, label, nodal_lcoe)

Calculate LCOE by province and technology

Parameters:

Name Type Description Default
n Network

the network object

required
label str

the label used by make summaries

required
nodal_lcoe DataFrame

the dataframe to fill/update

required

Returns: pd.DataFrame: updated nodal_lcoe

Source code in workflow/scripts/make_summary.py
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
def calculate_nodal_lcoe(n: pypsa.Network, label: str, nodal_lcoe: pd.DataFrame):
    """Calculate LCOE by province and technology

    Args:
        n (pypsa.Network): the network object
        label (str): the label used by make summaries
        nodal_lcoe (pd.DataFrame): the dataframe to fill/update
    Returns:
        pd.DataFrame: updated nodal_lcoe
    """

    lcoe_data = calc_lcoe(n, groupby=["location", "carrier"])

    lcoe_series = lcoe_data["LCOE"]

    nodal_lcoe[label] = lcoe_series

    return nodal_lcoe

calculate_peak_dispatch(n, label, supply)

Calculate the MAX dispatch of each component at the buses aggregated by carrier.

Parameters:

Name Type Description Default
n Network

the network object

required
label str

the labe representing the pathway

required
supply DataFrame

supply energy balance (empty df)

required

Returns:

Type Description
DataFrame

pd.DataFrame: updated supply DF

Source code in workflow/scripts/make_summary.py
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
def calculate_peak_dispatch(n: pypsa.Network, label: str, supply: pd.DataFrame) -> pd.DataFrame:
    """Calculate the MAX dispatch of each component at the buses aggregated by
    carrier.

    Args:
        n (pypsa.Network): the network object
        label (str): the labe representing the pathway
        supply (pd.DataFrame): supply energy balance (empty df)

    Returns:
        pd.DataFrame: updated supply DF
    """

    sup_ = n.statistics.supply(
        groupby=pypsa.statistics.get_carrier_and_bus_carrier, aggregate_time="max"
    )
    supply_reordered = sup_.reorder_levels([2, 0, 1])
    supply_reordered.sort_index(inplace=True)
    supply[label] = supply_reordered

    return supply

calculate_supply_energy(n, label, supply_energy)

Calculate the total energy supply/consuption of each component at the buses aggregated by carrier.

Parameters:

Name Type Description Default
n Network

the network object

required
label str

the labe representing the pathway

required
supply_energy DataFrame

supply energy balance (empty df)

required

Returns:

Type Description
DataFrame

pd.DataFrame: updated supply energy balance

Source code in workflow/scripts/make_summary.py
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
def calculate_supply_energy(
    n: pypsa.Network, label: str, supply_energy: pd.DataFrame
) -> pd.DataFrame:
    """Calculate the total energy supply/consuption of each component at the buses
    aggregated by carrier.

    Args:
        n (pypsa.Network): the network object
        label (str): the labe representing the pathway
        supply_energy (pd.DataFrame): supply energy balance (empty df)

    Returns:
        pd.DataFrame: updated supply energy balance
    """

    eb = n.statistics.energy_balance(groupby=pypsa.statistics.get_carrier_and_bus_carrier)
    # fragile
    eb_reordered = eb.reorder_levels([2, 0, 1])
    eb_reordered.sort_index(inplace=True)
    eb_reordered.rename(index={"AC": "transmission losses"}, level=2, inplace=True)

    supply_energy[label] = eb_reordered

    return supply_energy

calculate_t_avgd_prices(n, label, prices)

Time averaged prices for nodes averaged over carrier (bit silly?)

Parameters:

Name Type Description Default
n Network

the network object

required
label str

the label representing the pathway (not needed, refactor)

required
prices DataFrame

the dataframe to write to (not needed, refactor)

required

Returns: pd.DataFrame: updated prices

Source code in workflow/scripts/make_summary.py
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
def calculate_t_avgd_prices(n: pypsa.Network, label: str, prices: pd.DataFrame):
    """Time averaged prices for nodes averaged over carrier (bit silly?)

    Args:
        n (pypsa.Network): the network object
        label (str): the label representing the pathway (not needed, refactor)
        prices (pd.DataFrame): the dataframe to write to (not needed, refactor)
    Returns:
        pd.DataFrame: updated prices
    """
    prices = prices.reindex(prices.index.union(n.buses.carrier.unique()))

    # WARNING: this is time-averaged, see weighted_prices for load-weighted average
    prices[label] = n.buses_t.marginal_price.mean().groupby(n.buses.carrier).mean()

    return prices

calculate_weighted_prices(n, label, weighted_prices)

Demand-weighed prices for stores and loads. For stores if withdrawal is zero, use supply instead. Args: n (pypsa.Network): the network object label (str): the label representing the pathway (not needed, refactor) weighted_prices (pd.DataFrame): the dataframe to write to (not needed, refactor)

Returns:

Type Description
DataFrame

pd.DataFrame: updated weighted_prices

Source code in workflow/scripts/make_summary.py
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
def calculate_weighted_prices(
    n: pypsa.Network, label: str, weighted_prices: pd.DataFrame
) -> pd.DataFrame:
    """Demand-weighed prices for stores and loads.
        For stores if withdrawal is zero, use supply instead.
    Args:
        n (pypsa.Network): the network object
        label (str): the label representing the pathway (not needed, refactor)
        weighted_prices (pd.DataFrame): the dataframe to write to (not needed, refactor)

    Returns:
        pd.DataFrame: updated weighted_prices
    """
    entries = pd.Index(["electricity", "heat", "H2", "CO2 capture", "gas", "biomass"])
    weighted_prices = weighted_prices.reindex(entries)

    # loads
    load_rev = -1 * n.statistics.revenue(comps="Load", groupby=pypsa.statistics.get_bus_carrier)
    prices = load_rev / n.statistics.withdrawal(
        comps="Load", groupby=pypsa.statistics.get_bus_carrier
    )
    prices.rename(index={"AC": "electricity"}, inplace=True)

    # stores
    w = n.statistics.withdrawal(comps="Store")
    # biomass stores have no withdrawal for some reason
    if not w[w == 0].empty:
        w[w == 0] = n.statistics.supply(comps="Store")[w == 0]

    store_rev = n.statistics.revenue(comps="Store")
    mask = store_rev > load_rev.sum() / 400  # remove small
    wp_stores = store_rev[mask] / w[mask]
    weighted_prices[label] = pd.concat([prices, wp_stores.rename({"stations": "reservoir inflow"})])
    return weighted_prices

expand_from_wildcard(key, config)

return a list of values for the given key in the config file Args: key (str): the key to look for in the config file config (dict): the config file Returns: list: a list of values for the given key

Source code in workflow/scripts/make_summary.py
733
734
735
736
737
738
739
740
741
742
def expand_from_wildcard(key, config) -> list:
    """return a list of values for the given key in the config file
    Args:
        key (str): the key to look for in the config file
        config (dict): the config file
    Returns:
        list: a list of values for the given key
    """
    w = getattr(wildcards, key)
    return config["scenario"][key] if w == "all" else [w]

make_summaries(networks_dict, opts=None)

Make summary tables for the given network Args: networks_dict (dict): a dictionary of (pathway, time):network_path used in the run opts (dict): options for each summary function Returns: dict: a dictionary of dataframes with the summary tables

Source code in workflow/scripts/make_summary.py
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
def make_summaries(
    networks_dict: dict[tuple, os.PathLike], opts: dict = None
) -> dict[str, pd.DataFrame]:
    """Make summary tables for the given network
    Args:
        networks_dict (dict): a dictionary of (pathway, time):network_path used in the run
        opts (dict): options for each summary function
    Returns:
        dict: a dictionary of dataframes with the summary tables

    """
    if opts is None:
        opts = {}

    output_funcs = {
        "nodal_costs": calculate_nodal_costs,
        "nodal_capacities": calculate_nodal_capacities,
        "nodal_cfs": calculate_nodal_cfs,
        "nodal_lcoe": calculate_nodal_lcoe,
        "cfs": calculate_cfs,
        "costs": calculate_costs,
        "co2_balance": calculate_co2_balance,
        "capacities": calculate_capacities,
        "capacities_expanded": calculate_expanded_capacities,
        "curtailment_pc": calculate_curtailment,
        "peak_dispatch": calculate_peak_dispatch,
        # "energy": calculate_energy,
        "supply_energy": calculate_supply_energy,
        "time_averaged_prices": calculate_t_avgd_prices,
        "weighted_prices": calculate_weighted_prices,
        # "price_statistics": calculate_price_statistics,
        "market_values": calculate_market_values,
        "metrics": calculate_metrics,
    }

    columns = pd.MultiIndex.from_tuples(
        networks_dict.keys(), names=["co2_pathway", "planning_horizons"]
    )
    dataframes_dict = {}

    # TO DO: not needed, could be made by the functions
    for output in output_funcs.keys():
        dataframes_dict[output] = pd.DataFrame(columns=columns, dtype=float)

    for label, filename in networks_dict.items():
        logger.info(f"Make summary for scenario {label}, using {filename}")

        n = pypsa.Network(filename)
        assign_carriers(n)
        assign_locations(n)

        for output, output_fn in output_funcs.items():
            if output in opts:
                dataframes_dict[output] = output_fn(
                    n, label, dataframes_dict[output], **opts[output]
                )
            else:
                dataframes_dict[output] = output_fn(n, label, dataframes_dict[output])

    return dataframes_dict