|
1 | 1 | """Prepare non-LDV data from the IKARUS model via :file:`GEAM_TRP_techinput.xlsx`."""
|
2 | 2 |
|
3 | 3 | import logging
|
4 |
| -from collections import defaultdict |
5 | 4 | from functools import lru_cache, partial
|
6 | 5 | from operator import le
|
7 |
| -from typing import TYPE_CHECKING, Dict |
| 6 | +from typing import Dict |
8 | 7 |
|
9 | 8 | import pandas as pd
|
10 | 9 | import xarray as xr
|
11 | 10 | from genno import Computer, Key, KeySeq, Quantity, quote
|
12 | 11 | from genno.core.key import single_key
|
13 | 12 | from iam_units import registry
|
14 |
| -from message_ix import make_df |
15 | 13 | from openpyxl import load_workbook
|
16 | 14 |
|
17 |
| -from message_ix_models.model.structure import get_codes |
18 | 15 | from message_ix_models.util import (
|
19 |
| - ScenarioInfo, |
20 |
| - broadcast, |
21 | 16 | cached,
|
22 | 17 | convert_units,
|
23 | 18 | make_matched_dfs,
|
24 |
| - nodes_ex_world, |
25 | 19 | package_data_path,
|
26 | 20 | same_node,
|
27 | 21 | same_time,
|
28 | 22 | series_of_pint_quantity,
|
29 | 23 | )
|
30 | 24 |
|
31 | 25 | from .non_ldv import UNITS
|
32 |
| -from .util import input_commodity_level |
33 |
| - |
34 |
| -if TYPE_CHECKING: |
35 |
| - from .config import Config |
36 | 26 |
|
37 | 27 | log = logging.getLogger(__name__)
|
38 | 28 |
|
@@ -219,7 +209,27 @@ def read_ikarus_data(occupancy, k_output, k_inv_cost):
|
219 | 209 |
|
220 | 210 |
|
221 | 211 | def prepare_computer(c: Computer):
|
222 |
| - """Prepare `c` to perform model data preparation using IKARUS data.""" |
| 212 | + """Prepare `c` to perform model data preparation using IKARUS data. |
| 213 | +
|
| 214 | + ==================================================================================== |
| 215 | +
|
| 216 | + The data is read from from ``GEAM_TRP_techinput.xlsx``, and the processed data is |
| 217 | + exported into ``non_LDV_techs_wrapped.csv``. |
| 218 | +
|
| 219 | + .. note:: superseded by the computations set up by :func:`prepare_computer`. |
| 220 | +
|
| 221 | + Parameters |
| 222 | + ---------- |
| 223 | + context : .Context |
| 224 | +
|
| 225 | + Returns |
| 226 | + ------- |
| 227 | + data : dict of (str -> pandas.DataFrame) |
| 228 | + Keys are MESSAGE parameter names such as 'input', 'fix_cost'. |
| 229 | + Values are data frames ready for :meth:`~.Scenario.add_par`. |
| 230 | + Years in the data include the model horizon indicated by |
| 231 | + :attr:`.Config.base_model_info`, plus the additional year 2010. |
| 232 | + """ |
223 | 233 | # TODO identify whether capacity_factor is needed
|
224 | 234 | c.configure(rename_dims={"source": "source"})
|
225 | 235 |
|
@@ -337,152 +347,3 @@ def prepare_computer(c: Computer):
|
337 | 347 | # .non_ldv.prepare_computer() only if IKARUS is the selected data source for non-LDV
|
338 | 348 | # data. Other derived quantities (emissions factors) are also prepared there based
|
339 | 349 | # on these outputs.
|
340 |
| - |
341 |
| - |
342 |
| -def get_ikarus_data(context) -> Dict[str, pd.DataFrame]: |
343 |
| - """Prepare non-LDV data from :cite:`Martinsen2006`. |
344 |
| -
|
345 |
| - The data is read from from ``GEAM_TRP_techinput.xlsx``, and the processed data is |
346 |
| - exported into ``non_LDV_techs_wrapped.csv``. |
347 |
| -
|
348 |
| - .. note:: superseded by the computations set up by :func:`prepare_computer`. |
349 |
| -
|
350 |
| - Parameters |
351 |
| - ---------- |
352 |
| - context : .Context |
353 |
| -
|
354 |
| - Returns |
355 |
| - ------- |
356 |
| - data : dict of (str -> pandas.DataFrame) |
357 |
| - Keys are MESSAGE parameter names such as 'input', 'fix_cost'. |
358 |
| - Values are data frames ready for :meth:`~.Scenario.add_par`. |
359 |
| - Years in the data include the model horizon indicated by |
360 |
| - :attr:`.Config.base_model_info`, plus the additional year 2010. |
361 |
| - """ |
362 |
| - # Reference to the transport configuration |
363 |
| - config: "Config" = context.transport |
364 |
| - tech_info = config.spec.add.set["technology"] |
365 |
| - info = config.base_model_info |
366 |
| - |
367 |
| - # Merge with base model commodity information for io_units() below |
368 |
| - # TODO this duplicates code in .ldv; move to a common location |
369 |
| - all_info = ScenarioInfo() |
370 |
| - all_info.set["commodity"].extend(get_codes("commodity")) |
371 |
| - all_info.update(config.spec.add) |
372 |
| - |
373 |
| - # Retrieve the data from the spreadsheet. Use additional output efficiency and |
374 |
| - # investment cost factors for some bus technologies |
375 |
| - data = read_ikarus_data( |
376 |
| - occupancy=config.non_ldv_output, # type: ignore [attr-defined] |
377 |
| - k_output=config.efficiency["bus output"], |
378 |
| - k_inv_cost=config.cost["bus inv"], |
379 |
| - ) |
380 |
| - |
381 |
| - # Create data frames to add imported params to MESSAGEix |
382 |
| - |
383 |
| - # Vintage and active years from scenario info |
384 |
| - # Prepend years between 2010 and *firstmodelyear* so that values are saved |
385 |
| - missing_years = [x for x in info.set["year"] if (2010 <= x < info.y0)] |
386 |
| - vtg_years = missing_years + info.yv_ya["year_vtg"].tolist() |
387 |
| - act_years = missing_years + info.yv_ya["year_act"].tolist() |
388 |
| - |
389 |
| - # Default values to be used as args in make_df() |
390 |
| - defaults = dict( |
391 |
| - mode="all", |
392 |
| - year_act=act_years, |
393 |
| - year_vtg=vtg_years, |
394 |
| - time="year", |
395 |
| - time_origin="year", |
396 |
| - time_dest="year", |
397 |
| - ) |
398 |
| - |
399 |
| - # Dict of ('parameter name' -> [list of data frames]) |
400 |
| - dfs = defaultdict(list) |
401 |
| - |
402 |
| - # Iterate over each parameter and technology |
403 |
| - for (par, tec), group_data in data.groupby(["param", "technology"]): |
404 |
| - # Dict including the default values to be used as args in make_df() |
405 |
| - args = defaults.copy() |
406 |
| - args["technology"] = tec |
407 |
| - |
408 |
| - # Parameter-specific arguments/processing |
409 |
| - if par == "input": |
410 |
| - pass # Handled by input_commodity_level(), below |
411 |
| - elif par == "output": |
412 |
| - # Get the mode for a technology |
413 |
| - mode = tech_info[tech_info.index(tec)].parent.id |
414 |
| - args.update(dict(commodity=f"transport pax {mode.lower()}", level="useful")) |
415 |
| - |
416 |
| - # Units, as an abbreviated string |
417 |
| - _units = group_data.apply(lambda x: x.units).unique() |
418 |
| - assert len(_units) == 1, "Units must be unique per (tec, par)" |
419 |
| - units = _units[0] |
420 |
| - args["unit"] = f"{units:~}" |
421 |
| - |
422 |
| - # Create data frame with values from *args* |
423 |
| - df = make_df(par, **args) |
424 |
| - |
425 |
| - # Assign input commodity and level according to the technology |
426 |
| - if par == "input": |
427 |
| - df = input_commodity_level(context, df, default_level="final") |
428 |
| - |
429 |
| - # Copy data into the 'value' column, by vintage year |
430 |
| - for (year, *_), value in group_data.items(): |
431 |
| - df.loc[df["year_vtg"] == year, "value"] = value.magnitude |
432 |
| - |
433 |
| - # Drop duplicates. For parameters with 'year_vtg' but no 'year_act' dimension, |
434 |
| - # the same year_vtg appears multiple times because of the contents of *defaults* |
435 |
| - df.drop_duplicates(inplace=True) |
436 |
| - |
437 |
| - # Fill remaining values for the rest of vintage years with the last value |
438 |
| - # registered, in this case for 2030. |
439 |
| - df["value"] = df["value"].fillna(method="ffill") |
440 |
| - |
441 |
| - # Convert to the model's preferred input/output units for each commodity |
442 |
| - if par in ("input", "output"): |
443 |
| - target_units = df.apply( |
444 |
| - lambda row: all_info.io_units( |
445 |
| - row["technology"], row["commodity"], row["level"] |
446 |
| - ), |
447 |
| - axis=1, |
448 |
| - ).unique() |
449 |
| - assert 1 == len(target_units) |
450 |
| - else: |
451 |
| - target_units = [] |
452 |
| - |
453 |
| - if len(target_units): |
454 |
| - # FIXME improve convert_units() to handle more of these steps |
455 |
| - df["value"] = convert_units( |
456 |
| - df["value"], {"value": (1.0, units, target_units[0])} |
457 |
| - ) |
458 |
| - df["unit"] = f"{target_units[0]:~}" |
459 |
| - |
460 |
| - # Round up technical_lifetime values due to incompatibility in handling |
461 |
| - # non-integer values in the GAMS code |
462 |
| - if par == "technical_lifetime": |
463 |
| - df["value"] = df["value"].round() |
464 |
| - |
465 |
| - # Broadcast across all nodes |
466 |
| - dfs[par].append( |
467 |
| - df.pipe(broadcast, node_loc=nodes_ex_world(info.N)).pipe(same_node) |
468 |
| - ) |
469 |
| - |
470 |
| - # Concatenate data frames for each model parameter |
471 |
| - result = {par: pd.concat(list_of_df) for par, list_of_df in dfs.items()} |
472 |
| - |
473 |
| - # Capacity factors all 1.0 |
474 |
| - result.update(make_matched_dfs(result["output"], capacity_factor=1.0)) |
475 |
| - result["capacity_factor"]["unit"] = "" |
476 |
| - |
477 |
| - if context.get("debug", False): |
478 |
| - # Directory for debug output (if any) |
479 |
| - debug_dir = context.get_local_path("debug") |
480 |
| - # Ensure the directory |
481 |
| - debug_dir.mkdir(parents=True, exist_ok=True) |
482 |
| - |
483 |
| - for name, df in result.items(): |
484 |
| - target = debug_dir.joinpath(f"ikarus-{name}.csv") |
485 |
| - log.info(f"Dump data to {target}") |
486 |
| - df.to_csv(target, index=False) |
487 |
| - |
488 |
| - return result |
0 commit comments